Skip to content

Eth spec tests

Eth spec tests #1355

Workflow file for this run

name: Eth spec tests
# Run tests every day or by manual request
on:
schedule:
- cron: '0 */3 * * *' # every 3 hours
workflow_dispatch:
# Set default permissions
permissions:
contents: read
pull-requests: write
checks: write
# Cancel workflow if a new run for the same commit has been triggered
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
# Use bash with -e and -x flags for all run steps
# for more visibility
defaults:
run:
shell: bash -ex {0}
# Common variables for all jobs
env:
TEST_SET: >
frontier/create
frontier/opcodes
frontier/precompiles
frontier/scenarios
homestead/coverage
homestead/yul
byzantium/eip198_modexp_precompile
constantinople/eip1014_create2
istanbul/eip1344_chainid
berlin/eip2930_access_list
paris/security
shanghai/eip3651_warm_coinbase
shanghai/eip3855_push0
shanghai/eip3860_initcode
cancun/eip1153_tstore
cancun/eip5656_mcopy
zkevm/test_worst_compute.py
jobs:
# Run Eth spec tests on gETH node
geth-tests:
runs-on: matterlabs-ci-runner-high-performance
env:
L1_RPC_URL: http://localhost:8545
CHAIN_ID: 1337
steps:
- name: Checkout tests
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: Romsters/boojumos-e2e-tests
submodules: recursive
ref: stable # use tag to pin a stable version
- name: Install python
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: 3.13
- name: Install pip dependencies
run: pip install -r ./python-scripts/requirements.txt
- name: Install gETH node
run: |
sudo add-apt-repository -y ppa:ethereum/ethereum
sudo apt-get update
sudo apt-get install ethereum --yes
- name: Install yq
env:
YQ_DOWNLOAD_URL: https://github.com/mikefarah/yq/releases/download/v4.45.4/yq_linux_amd64
run: |
wget "${YQ_DOWNLOAD_URL}"
mv yq_linux_amd64 yq
chmod +x yq
echo "${PWD}" >> "${GITHUB_PATH}"
- name: Start gETH node
run: |
geth --dev --http --http.api eth,web3,net \
--datadir data --miner.gaslimit 9000000000000 \
--rpc.allow-unprotected-txs --mine &
- name: Wait for geth to start
run: |
while ! nc -z localhost 8545; do
echo "Waiting for geth to start..."
sleep 3
done
echo "Geth is up and running."
- name: Create test account
run: |
PASSWORD=testpassword
echo "${PASSWORD}" > password.txt
geth account new --datadir ./data --password password.txt
KEYSTORE_FILE=$(ls ./data/keystore | tail -1)
PRIVATE_KEY=$(python ./python-scripts/export-private-key.py "./data/keystore/${KEYSTORE_FILE}" "${PASSWORD}")
echo "PRIVATE_KEY=${PRIVATE_KEY}" >> "${GITHUB_ENV}"
sleep 5
- name: Fund test account with ETH
run: |
# Fund test account with some ETH
ACCOUNTS=$(curl -s -X POST \
-H "Content-Type: application/json" \
--data '{"jsonrpc":"2.0","method":"eth_accounts","params":[],"id":1}' \
${L1_RPC_URL})
COINBASE=$(echo "${ACCOUNTS}" | jq -r '.result[0]')
TEST_ACCOUNT=$(echo "${ACCOUNTS}" | jq -r '.result[1]')
python ./python-scripts/fund-wallet.py --coinbase "${COINBASE}" --to "${TEST_ACCOUNT}" --eth 50000.0
- name: Run spec tests on geth
run: |
# Setup tests
export PATH=${HOME}/.local/bin:${PATH}
./scripts/setup-exec-tests.sh
cd lib/execution-spec-tests
# Prepare environment with chainId
uv run eest make env
yq e ".remote_nodes[0].chain_id = ${CHAIN_ID}" -i env.yaml
for TEST_PATH in ${TEST_SET}; do
TEST_NAME="$(basename ${TEST_PATH})"
uv run execute remote \
-n auto \
--fork Cancun \
--rpc-endpoint "${L1_RPC_URL}" \
--rpc-chain-id "${CHAIN_ID}" \
--rpc-seed-key "${PRIVATE_KEY}" \
--junitxml="junit-report-${TEST_NAME//\//-}.xml" \
--json-report \
"tests/${TEST_PATH}" || true
mv .report.json "report-${TEST_NAME//\//-}.json"
done
- name: Upload test results
if: (!cancelled())
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: test-results-geth
path: |
lib/execution-spec-tests/junit-report*.xml
lib/execution-spec-tests/report*.json
# Run Eth spec tests on ZKsync OS node
zk-os-tests:
runs-on: matterlabs-ci-runner-high-performance
env:
RPC_URL: http://localhost:3050
CHAIN_ID: 6565
# Use hardcoded rich wallet
WALLET_ADDRESS: "0x36615Cf349d7F6344891B1e7CA7C72883F5dc049"
PRIVATE_KEY: "0x7726827caac94a7f9e1b160f7ea819f172f7b6f9d2a97f992c38edeab82d4110"
SERVER_LOGFILE: zksync-os-server.log.txt
TESTS_WORKING_DIR: zksync-os-e2e-tests
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup runner
uses: ./.github/actions/runner-setup
- name: Decompress anvil state
run: gzip -dfk ./local-chains/v30.2/l1-state.json.gz
- name: Run anvil L1
run: anvil --load-state ./local-chains/v30.2/l1-state.json --port 8545 &
- name: Build server
run: cargo build --release --bin zksync-os-server
- name: Run server
env:
# Do not use colored logs, otherwise logfile contains ANSI codes that are not human-readable
LOG_USE_COLOR: false
# Set pubdata price to constant value (10^9 wei), tests use constant gas limits and it doesn't work with big pubdata price.
FEE_PUBDATA_PRICE_OVERRIDE: "0x3B9ACA00"
run: cargo run --release --bin zksync-os-server > ${SERVER_LOGFILE} 2>&1 &
- name: Wait for zksync OS to start
run: |
TIMEOUT=300
INTERVAL=3
START_TIME=$(date +%s)
while ! nc -z localhost 3050; do
NOW=$(date +%s)
ELAPSED=$((NOW - START_TIME))
if [ "$ELAPSED" -ge "$TIMEOUT" ]; then
echo "Timed out after ${TIMEOUT}s waiting for zksync OS on port 3050"
exit 1
fi
echo "Waiting for ZKsync OS Server to start... (${ELAPSED}s elapsed)"
sleep $INTERVAL
done
echo "Sequencer is up and running."
- name: Checkout solidity tests
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: antonbaliasnikov/solidity-tester
ref: main # use latest main branch to get test updates on early stages
path: solidity-tester
- name: Install NodeJS
uses: actions/setup-node@v5
with:
node-version: 22
- name: Run tests
working-directory: solidity-tester
run: cargo nextest run --retries 3
- name: Checkout tests
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: Romsters/boojumos-e2e-tests
submodules: recursive
ref: stable # use tag to pin a stable version
path: ${{ env.TESTS_WORKING_DIR }}
- name: Run spec tests
working-directory: ${{ env.TESTS_WORKING_DIR }}
run: |
# Setup tests
export PATH="${HOME}/.local/bin:${PATH}"
./scripts/setup-exec-tests.sh
cast balance --rpc-url "${RPC_URL}" "${WALLET_ADDRESS}"
cd lib/execution-spec-tests
# Prepare environment with chainId
uv run eest make env
yq e ".remote_nodes[0].chain_id = ${CHAIN_ID}" -i env.yaml
# Run tests
for TEST_PATH in ${TEST_SET}; do
TEST_NAME="$(basename ${TEST_PATH})"
uv run execute remote \
--fork Cancun \
--rpc-endpoint "${RPC_URL}" \
--rpc-chain-id "${CHAIN_ID}" \
--rpc-seed-key "${PRIVATE_KEY}" \
--junitxml="junit-report-${TEST_NAME//\//-}.xml" \
--json-report \
"tests/${TEST_PATH}" || true
mv .report.json "report-${TEST_NAME//\//-}.json"
done
- name: Upload server logs
if: (!cancelled())
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: zksync-os-server-logs
path: ${{ env.SERVER_LOGFILE }}
- name: Upload test results
if: (!cancelled())
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: test-results-zk-os
path: |
${{ env.TESTS_WORKING_DIR }}/lib/execution-spec-tests/junit-report*.xml
${{ env.TESTS_WORKING_DIR }}/lib/execution-spec-tests/report*.json
- name: Publish Test Results
uses: EnricoMi/publish-unit-test-result-action@v2
if: (!cancelled())
with:
check_name: "ZK OS Execution Spec Tests"
files: "${{ env.TESTS_WORKING_DIR }}/lib/execution-spec-tests/junit-report*.xml"
compare_to_earlier_commit: false
action_fail: true # Fail the action if any of the tests failed
- name: Slack notification
if: ${{ failure() && github.ref == 'refs/heads/main' }}
uses: ./.github/actions/slack-notify
with:
webhook: ${{ secrets.SLACK_WEBHOOK_RELEASES }}
context: "ZKsync OS Server Eth Spec Tests scheduled run failed"
# Create a compatibility table from both test runs
# to compare results and durations
compatibility-table:
runs-on: ubuntu-latest
needs: [zk-os-tests, geth-tests]
steps:
- name: Download reports
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
with:
pattern: test-results-*
path: data
merge-multiple: 'false'
- name: Prepare markdown table
run: |
# Results directories
geth_dir="./data/test-results-geth"
zkos_dir="./data/test-results-zk-os"
# Extract test names from both dirs and combine unique sorted list
tests=($( (ls "${geth_dir}"/report-*.json 2>/dev/null; ls "${zkos_dir}"/report-*.json 2>/dev/null) \
| xargs -n1 basename \
| sed -E 's/report-(.*)\.json/\1/' \
| sort -u)
)
# Markdown header
echo "| Test Name | ZK OS (✅/❌) | ⏱️ Durations (Geth / ZK OS / Δ) |" > report.md
echo "|---------------------|--------------|----------------------------------|" >> report.md
# Totals for summary
total_tests=0
sum_geth=0
sum_zkos=0
sum_diff=0
zkos_passed_total=0
zkos_total_total=0
for test in "${tests[@]}"; do
geth_file="$geth_dir/report-$test.json"
zkos_file="$zkos_dir/report-$test.json"
# Defaults
geth_duration=0
zkos_duration=0
zkos_passed=0
zkos_total=0
if [[ -f "$geth_file" ]]; then
geth_duration=$(jq '.duration // 0' "$geth_file")
fi
if [[ -f "$zkos_file" ]]; then
zkos_passed=$(jq '.summary.passed // 0' "$zkos_file")
zkos_total=$(jq '.summary.total // 0' "$zkos_file")
zkos_duration=$(jq '.duration // 0' "$zkos_file")
fi
# Compute diff (ZK OS - Geth)
duration_diff=$(echo "$zkos_duration - $geth_duration" | bc -l)
# Arrow for diff
if (( $(echo "$duration_diff >= 0" | bc -l) )); then
diff_arrow="🔼" # slower
else
diff_arrow="🔽" # faster
fi
# ZK OS status emoji
if [[ "$zkos_total" -gt 0 && "$zkos_passed" -eq "$zkos_total" ]]; then
zkos_status="✅"
elif [[ "$zkos_passed" -gt 0 ]]; then
zkos_status="🟡"
else
zkos_status="❌"
fi
# Format numbers
geth_duration_fmt=$(printf "%.2f" "$geth_duration")
zkos_duration_fmt=$(printf "%.2f" "$zkos_duration")
duration_diff_fmt=$(printf "%+.2f" "$duration_diff")
duration_str="⏱️ ${geth_duration_fmt}s / ${zkos_duration_fmt}s / ${diff_arrow} ${duration_diff_fmt}s"
# Row
printf "| \`%-19s\` | %s %d / %d | %s |\n" \
"$test" "$zkos_status" "$zkos_passed" "$zkos_total" "$duration_str" >> report.md
# Accumulate for summary
total_tests=$((total_tests + 1))
zkos_passed_total=$((zkos_passed_total + zkos_passed))
zkos_total_total=$((zkos_total_total + zkos_total))
sum_geth=$(echo "$sum_geth + $geth_duration" | bc -l)
sum_zkos=$(echo "$sum_zkos + $zkos_duration" | bc -l)
sum_diff=$(echo "$sum_diff + $duration_diff" | bc -l)
done
# Summary row (average per test + percent)
if [[ "$total_tests" -gt 0 ]]; then
avg_geth=$(echo "$sum_geth" | bc -l)
avg_zkos=$(echo "$sum_zkos" | bc -l)
avg_diff=$(echo "$avg_geth - $avg_zkos" | bc -l)
# Percent change of averages (equivalently suite-weighted)
avg_pct=$(echo "($sum_zkos / $sum_geth - 1) * 100" | bc -l)
if (( $(echo "$avg_diff >= 0" | bc -l) )); then
avg_arrow="🔼"
pct_arrow="🔼"
else
avg_arrow="🔽"
pct_arrow="🔽"
fi
avg_geth_fmt=$(printf "%.2f" "$avg_geth")
avg_zkos_fmt=$(printf "%.2f" "$avg_zkos")
avg_diff_fmt=$(printf "%+.2f" "$avg_diff")
avg_pct_fmt=$(printf "%+.2f" "$avg_pct")
echo "|---------------------|--------------|----------------------------------|" >> report.md
printf "| **All Tests (avg per test)** | %d / %d | ⏱️ %ss / %ss / %s %ss (%s %s%%) |\n" \
"$zkos_passed_total" "$zkos_total_total" \
"$avg_geth_fmt" "$avg_zkos_fmt" "$avg_arrow" "$avg_diff_fmt" "$pct_arrow" "$avg_pct_fmt" >> report.md
fi
- name: Update job summary
run: cat report.md >> "${GITHUB_STEP_SUMMARY}"