From 7f369f085a3758b6949fd1368c2485d4ec1918c1 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Tue, 2 Dec 2025 16:32:11 +0100 Subject: [PATCH 01/87] integration: v2 migration to go --- cmd/integration/main.go | 1735 +++++++++++++++++ go.mod | 15 + go.sum | 12 + .../mainnet/debug_traceCallMany/test_02.json | 64 + .../mainnet/debug_traceCallMany/test_02.tar | Bin 10240 -> 0 bytes .../{test_04.tar => test_04.json} | Bin 1239040 -> 1234148 bytes .../{test_05.tar => test_05.json} | Bin 409600 -> 401160 bytes .../{test_06.tar => test_06.json} | Bin 174080 -> 168755 bytes .../{test_07.tar => test_07.json} | Bin 1075200 -> 1068213 bytes .../{test_08.tar => test_08.json} | Bin 1843200 -> 1839703 bytes .../{test_10.tar => test_10.json} | Bin 5396480 -> 5384242 bytes 11 files changed, 1826 insertions(+) create mode 100644 cmd/integration/main.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 integration/mainnet/debug_traceCallMany/test_02.json delete mode 100644 integration/mainnet/debug_traceCallMany/test_02.tar rename integration/mainnet/debug_traceCallMany/{test_04.tar => test_04.json} (99%) rename integration/mainnet/debug_traceCallMany/{test_05.tar => test_05.json} (97%) rename integration/mainnet/debug_traceCallMany/{test_06.tar => test_06.json} (96%) rename integration/mainnet/debug_traceCallMany/{test_07.tar => test_07.json} (99%) rename integration/mainnet/debug_traceCallMany/{test_08.tar => test_08.json} (99%) rename integration/mainnet/debug_traceCallMany/{test_10.tar => test_10.json} (99%) diff --git a/cmd/integration/main.go b/cmd/integration/main.go new file mode 100644 index 00000000..4dc44fb5 --- /dev/null +++ b/cmd/integration/main.go @@ -0,0 +1,1735 @@ +package main + +import ( + "archive/tar" + "bytes" + "compress/bzip2" + "compress/gzip" + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/golang-jwt/jwt/v5" + "github.com/gorilla/websocket" +) + +const ( + DaemonOnOtherPort = "other-daemon" + DaemonOnDefaultPort = "rpcdaemon" + None = "none" + ExternalProvider = "external-provider" + TimeInterval = 100 * time.Millisecond + MaxTime = 200 + TempDirname = "./temp_rpc_tests" +) + +var ( + apiNotCompared = []string{ + "mainnet/engine_getClientVersionV1", + "mainnet/trace_rawTransaction", + "mainnet/engine_", + } + + testsNotCompared = []string{} + testsNotComparedMessage = []string{} + testsNotComparedError = []string{} + + // testsOnLatest - add your list here + testsOnLatest = []string{ + "mainnet/debug_traceBlockByNumber/test_24.json", + "mainnet/debug_traceBlockByNumber/test_30.json", + "mainnet/debug_traceCall/test_22.json", + "mainnet/debug_traceCall/test_33.json", + "mainnet/debug_traceCall/test_34.json", + "mainnet/debug_traceCall/test_35.json", + "mainnet/debug_traceCall/test_36.json", + "mainnet/debug_traceCall/test_37.json", + "mainnet/debug_traceCall/test_38.json", + "mainnet/debug_traceCall/test_39.json", + "mainnet/debug_traceCall/test_40.json", + "mainnet/debug_traceCall/test_41.json", + "mainnet/debug_traceCall/test_42.json", + "mainnet/debug_traceCall/test_43.json", + "mainnet/debug_traceCallMany/test_11.json", + "mainnet/debug_traceCallMany/test_12.json", + "mainnet/eth_blobBaseFee", // works always on the latest block + "mainnet/eth_blockNumber", // works always on the latest block + "mainnet/eth_call/test_20.json", + "mainnet/eth_call/test_28.json", + "mainnet/eth_call/test_29.json", + "mainnet/eth_call/test_36.json", + "mainnet/eth_call/test_37.json", + "mainnet/eth_callBundle/test_09.json", + "mainnet/eth_createAccessList/test_18.json", + "mainnet/eth_createAccessList/test_19.json", + "mainnet/eth_createAccessList/test_20.json", + "mainnet/eth_createAccessList/test_22.json", + "mainnet/eth_estimateGas/test_01", + "mainnet/eth_estimateGas/test_02", + "mainnet/eth_estimateGas/test_03", + "mainnet/eth_estimateGas/test_04", + "mainnet/eth_estimateGas/test_05", + "mainnet/eth_estimateGas/test_06", + "mainnet/eth_estimateGas/test_07", + "mainnet/eth_estimateGas/test_08", + "mainnet/eth_estimateGas/test_09", + "mainnet/eth_estimateGas/test_10", + "mainnet/eth_estimateGas/test_11", + "mainnet/eth_estimateGas/test_12", + "mainnet/eth_estimateGas/test_21", + "mainnet/eth_estimateGas/test_22", + "mainnet/eth_estimateGas/test_23", + "mainnet/eth_estimateGas/test_27", + "mainnet/eth_feeHistory/test_07.json", + "mainnet/eth_feeHistory/test_22.json", + "mainnet/eth_gasPrice", // works always on the latest block + "mainnet/eth_getBalance/test_03.json", + "mainnet/eth_getBalance/test_26.json", + "mainnet/eth_getBalance/test_27.json", + "mainnet/eth_getBlockTransactionCountByNumber/test_03.json", + "mainnet/eth_getBlockByNumber/test_10.json", + "mainnet/eth_getBlockByNumber/test_27.json", + "mainnet/eth_getBlockReceipts/test_07.json", + "mainnet/eth_getCode/test_05.json", + "mainnet/eth_getCode/test_06.json", + "mainnet/eth_getCode/test_07.json", + "mainnet/eth_getLogs/test_21.json", + "mainnet/eth_getProof/test_01.json", + "mainnet/eth_getProof/test_02.json", + "mainnet/eth_getProof/test_03.json", + "mainnet/eth_getProof/test_04.json", + "mainnet/eth_getProof/test_05.json", + "mainnet/eth_getProof/test_06.json", + "mainnet/eth_getProof/test_07.json", + "mainnet/eth_getProof/test_08.json", + "mainnet/eth_getProof/test_09.json", + "mainnet/eth_getProof/test_10.json", + "mainnet/eth_getProof/test_11.json", + "mainnet/eth_getProof/test_12.json", + "mainnet/eth_getProof/test_13.json", + "mainnet/eth_getProof/test_14.json", + "mainnet/eth_getProof/test_15.json", + "mainnet/eth_getProof/test_16.json", + "mainnet/eth_getProof/test_17.json", + "mainnet/eth_getProof/test_18.json", + "mainnet/eth_getProof/test_19.json", + "mainnet/eth_getProof/test_20.json", + "mainnet/eth_getRawTransactionByBlockNumberAndIndex/test_11.json", + "mainnet/eth_getRawTransactionByBlockNumberAndIndex/test_12.json", + "mainnet/eth_getRawTransactionByBlockNumberAndIndex/test_13.json", + "mainnet/eth_getStorageAt/test_04.json", + "mainnet/eth_getStorageAt/test_07.json", + "mainnet/eth_getStorageAt/test_08.json", + "mainnet/eth_getTransactionByBlockNumberAndIndex/test_02.json", + "mainnet/eth_getTransactionByBlockNumberAndIndex/test_08.json", + "mainnet/eth_getTransactionByBlockNumberAndIndex/test_09.json", + "mainnet/eth_getTransactionCount/test_02.json", + "mainnet/eth_getTransactionCount/test_07.json", + "mainnet/eth_getTransactionCount/test_08.json", + "mainnet/eth_getUncleCountByBlockNumber/test_03.json", + "mainnet/eth_getUncleByBlockNumberAndIndex/test_02.json", + "mainnet/eth_maxPriorityFeePerGas", + "mainnet/eth_simulateV1/test_04.json", + "mainnet/eth_simulateV1/test_05.json", + "mainnet/eth_simulateV1/test_06.json", + "mainnet/eth_simulateV1/test_07.json", + "mainnet/eth_simulateV1/test_12.json", + "mainnet/eth_simulateV1/test_13.json", + "mainnet/eth_simulateV1/test_14.json", + "mainnet/eth_simulateV1/test_15.json", + "mainnet/eth_simulateV1/test_16.json", + "mainnet/eth_simulateV1/test_25.json", + "mainnet/eth_simulateV1/test_27.json", + "mainnet/erigon_blockNumber/test_4.json", + "mainnet/erigon_blockNumber/test_6.json", + "mainnet/ots_hasCode/test_10.json", + "mainnet/ots_searchTransactionsBefore/test_02.json", + "mainnet/parity_listStorageKeys", + "mainnet/trace_block/test_25.json", + "mainnet/trace_call/test_26.json", + "mainnet/trace_call/test_27.json", + "mainnet/trace_call/test_28.json", + "mainnet/trace_call/test_29.json", + "mainnet/trace_callMany/test_15.json", + "mainnet/trace_filter/test_25.json", + "mainnet/trace_replayBlockTransactions/test_36.json", + } +) + +type Config struct { + ExitOnFail bool + DaemonUnderTest string + DaemonAsReference string + LoopNumber int + VerboseLevel int + ReqTestNumber int + ForceDumpJSONs bool + ExternalProviderURL string + DaemonOnHost string + ServerPort int + EnginePort int + TestingAPIsWith string + TestingAPIs string + VerifyWithDaemon bool + Net string + JSONDir string + ResultsDir string + OutputDir string + ExcludeAPIList string + ExcludeTestList string + StartTest string + JWTSecret string + DisplayOnlyFail bool + TransportType string + Parallel bool + UseJSONDiff bool + WithoutCompareResults bool + WaitingTime int + DoNotCompareError bool + TestsOnLatestBlock bool + LocalServer string +} + +type TestResult struct { + Success bool + Error error + Test *TestDescriptor +} + +type TestDescriptor struct { + Name string + Number int + TransportType string + ResultChan chan TestResult +} + +type JSONRPCCommand struct { + Request interface{} `json:"request"` + Response interface{} `json:"response"` +} + +func NewConfig() *Config { + return &Config{ + ExitOnFail: true, + DaemonUnderTest: DaemonOnDefaultPort, + DaemonAsReference: None, + LoopNumber: 1, + VerboseLevel: 0, + ReqTestNumber: -1, + ForceDumpJSONs: false, + ExternalProviderURL: "", + DaemonOnHost: "localhost", + ServerPort: 0, + EnginePort: 0, + TestingAPIsWith: "", + TestingAPIs: "", + VerifyWithDaemon: false, + Net: "mainnet", + ResultsDir: "results", + JWTSecret: "", + DisplayOnlyFail: false, + TransportType: "http", + Parallel: true, + UseJSONDiff: true, + WithoutCompareResults: false, + WaitingTime: 0, + DoNotCompareError: false, + TestsOnLatestBlock: false, + } +} + +func (c *Config) parseFlags() error { + help := flag.Bool("h", false, "print help") + flag.BoolVar(help, "help", false, "print help") + + continueOnFail := flag.Bool("c", false, "continue on test failure") + flag.BoolVar(continueOnFail, "continue", false, "continue on test failure") + + daemonPort := flag.Bool("I", false, "use 51515/51516 ports to server") + flag.BoolVar(daemonPort, "daemon-port", false, "use 51515/51516 ports to server") + + externalProvider := flag.String("e", "", "verify external provider URL") + flag.StringVar(externalProvider, "verify-external-provider", "", "verify external provider URL") + + serial := flag.Bool("S", false, "run tests in serial") + flag.BoolVar(serial, "serial", false, "run tests in serial") + + host := flag.String("H", "localhost", "host where RpcDaemon is located") + flag.StringVar(host, "host", "localhost", "host where RpcDaemon is located") + + testOnLatest := flag.Bool("L", false, "run only tests on latest block") + flag.BoolVar(testOnLatest, "tests-on-latest-block", false, "run only tests on latest block") + + port := flag.Int("p", 0, "port where RpcDaemon is located") + flag.IntVar(port, "port", 0, "port where RpcDaemon is located") + + enginePort := flag.Int("P", 0, "engine port") + flag.IntVar(enginePort, "engine-port", 0, "engine port") + + displayOnlyFail := flag.Bool("f", false, "display only failed tests") + flag.BoolVar(displayOnlyFail, "display-only-fail", false, "display only failed tests") + + verbose := flag.Int("v", 0, "verbose level (0-2)") + flag.IntVar(verbose, "verbose", 0, "verbose level (0-2)") + + testNumber := flag.Int("t", -1, "run single test number") + flag.IntVar(testNumber, "run-test", -1, "run single test number") + + startTest := flag.String("s", "", "start from test number") + flag.StringVar(startTest, "start-from-test", "", "start from test number") + + apiListWith := flag.String("a", "", "API list with pattern") + flag.StringVar(apiListWith, "api-list-with", "", "API list with pattern") + + apiList := flag.String("A", "", "API list exact match") + flag.StringVar(apiList, "api-list", "", "API list exact match") + + loops := flag.Int("l", 1, "number of loops") + flag.IntVar(loops, "loops", 1, "number of loops") + + compareErigon := flag.Bool("d", false, "compare with Erigon RpcDaemon") + flag.BoolVar(compareErigon, "compare-erigon-rpcdaemon", false, "compare with Erigon RpcDaemon") + + jwtFile := flag.String("k", "", "JWT secret file") + flag.StringVar(jwtFile, "jwt", "", "JWT secret file") + + createJWT := flag.String("K", "", "create JWT secret file") + flag.StringVar(createJWT, "create-jwt", "", "create JWT secret file") + + blockchain := flag.String("b", "mainnet", "blockchain network") + flag.StringVar(blockchain, "blockchain", "mainnet", "blockchain network") + + transportType := flag.String("T", "http", "transport type") + flag.StringVar(transportType, "transport-type", "http", "transport type") + + excludeAPIList := flag.String("x", "", "exclude API list") + flag.StringVar(excludeAPIList, "exclude-api-list", "", "exclude API list") + + excludeTestList := flag.String("X", "", "exclude test list") + flag.StringVar(excludeTestList, "exclude-test-list", "", "exclude test list") + + jsonDiff := flag.Bool("j", true, "use json-diff") + flag.BoolVar(jsonDiff, "json-diff", true, "use json-diff") + + waitingTime := flag.Int("w", 0, "waiting time in milliseconds") + flag.IntVar(waitingTime, "waiting-time", 0, "waiting time in milliseconds") + + dumpResponse := flag.Bool("o", false, "dump response") + flag.BoolVar(dumpResponse, "dump-response", false, "dump response") + + withoutCompare := flag.Bool("i", false, "without compare results") + flag.BoolVar(withoutCompare, "without-compare-results", false, "without compare results") + + doNotCompareError := flag.Bool("E", false, "do not compare error") + flag.BoolVar(doNotCompareError, "do-not-compare-error", false, "do not compare error") + + flag.Parse() + + if *help { + usage() + os.Exit(0) + } + + // Validation and conflicts + if *waitingTime > 0 && c.Parallel { + return fmt.Errorf("waiting-time is not compatible with parallel tests") + } + + if *daemonPort && *compareErigon { + return fmt.Errorf("daemon-port is not compatible with compare-erigon-rpcdaemon") + } + + if *testNumber != -1 && (*excludeTestList != "" || *excludeAPIList != "") { + return fmt.Errorf("run-test is not compatible with exclude-api-list or exclude-test-list") + } + + if *apiList != "" && *excludeAPIList != "" { + return fmt.Errorf("api-list is not compatible with exclude-api-list") + } + + if *compareErigon && *withoutCompare { + return fmt.Errorf("compare-erigon-rpcdaemon is not compatible with without-compare-results") + } + + // Apply configuration + c.ExitOnFail = !*continueOnFail + c.VerboseLevel = *verbose + c.ReqTestNumber = *testNumber + c.LoopNumber = *loops + c.DaemonOnHost = *host + c.ServerPort = *port + c.EnginePort = *enginePort + c.DisplayOnlyFail = *displayOnlyFail + c.TestingAPIsWith = *apiListWith + c.TestingAPIs = *apiList + c.Net = *blockchain + c.ExcludeAPIList = *excludeAPIList + c.ExcludeTestList = *excludeTestList + c.StartTest = *startTest + c.TransportType = *transportType + c.UseJSONDiff = *jsonDiff + c.WaitingTime = *waitingTime + c.ForceDumpJSONs = *dumpResponse + c.WithoutCompareResults = *withoutCompare + c.DoNotCompareError = *doNotCompareError + c.TestsOnLatestBlock = *testOnLatest + c.Parallel = !*serial + + if *daemonPort { + c.DaemonUnderTest = DaemonOnOtherPort + } + + if *externalProvider != "" { + c.DaemonAsReference = ExternalProvider + c.ExternalProviderURL = *externalProvider + c.VerifyWithDaemon = true + } + + if *compareErigon { + c.VerifyWithDaemon = true + c.DaemonAsReference = DaemonOnDefaultPort + c.UseJSONDiff = true + } + + if *createJWT != "" { + if err := generateJWTSecret(*createJWT, 64); err != nil { + return fmt.Errorf("failed to create JWT secret: %v", err) + } + secret, err := getJWTSecret(*createJWT) + if err != nil { + return fmt.Errorf("failed to read JWT secret: %v", err) + } + c.JWTSecret = secret + } else if *jwtFile != "" { + secret, err := getJWTSecret(*jwtFile) + if err != nil { + return fmt.Errorf("secret file not found: %s", *jwtFile) + } + c.JWTSecret = secret + } + + // Validate transport type + if *transportType != "" { + types := strings.Split(*transportType, ",") + for _, t := range types { + if t != "websocket" && t != "http" && t != "http_comp" && t != "https" && t != "websocket_comp" { + return fmt.Errorf("invalid connection type: %s", t) + } + } + } + + c.UpdateDirs() + + // Remove output directory if exists + if _, err := os.Stat(c.OutputDir); err == nil { + err := os.RemoveAll(c.OutputDir) + if err != nil { + return err + } + } + + return nil +} + +func (c *Config) UpdateDirs() { + c.JSONDir = "./integration/" + c.Net + "/" + c.OutputDir = c.JSONDir + c.ResultsDir + "/" + if c.ServerPort == 0 { + c.ServerPort = 8545 + } + if c.EnginePort == 0 { + c.EnginePort = 8551 + } + c.LocalServer = "http://" + c.DaemonOnHost + ":" + strconv.Itoa(c.ServerPort) +} + +// Part 2: Utility Functions + +func usage() { + fmt.Println("Usage: integration [options]") + fmt.Println("") + fmt.Println("Launch an automated RPC test sequence on target blockchain node") + fmt.Println("") + fmt.Println("Options:") + fmt.Println(" -h, --help print this help") + fmt.Println(" -j, --json-diff use json-diff to make compare [default use json-diff]") + fmt.Println(" -f, --display-only-fail shows only failed tests (not Skipped) [default: print all]") + fmt.Println(" -E, --do-not-compare-error do not compare error") + fmt.Println(" -v, --verbose 0: no message; 1: print result; 2: print request/response [default 0]") + fmt.Println(" -c, --continue runs all tests even if one test fails [default: exit at first failed test]") + fmt.Println(" -l, --loops [default loop 1]") + fmt.Println(" -b, --blockchain [default: mainnet]") + fmt.Println(" -s, --start-from-test run tests starting from specified test number [default starts from 1]") + fmt.Println(" -t, --run-test run single test using global test number") + fmt.Println(" -d, --compare-erigon-rpcdaemon send requests also to the reference daemon e.g.: Erigon RpcDaemon") + fmt.Println(" -T, --transport-type http,http_comp,https,websocket,websocket_comp [default http]") + fmt.Println(" -k, --jwt authentication token file") + fmt.Println(" -K, --create-jwt generate authentication token file and use it") + fmt.Println(" -a, --api-list-with run all tests of the specified API that contains string") + fmt.Println(" -A, --api-list run all tests of the specified API that match full name") + fmt.Println(" -x, --exclude-api-list exclude API list") + fmt.Println(" -X, --exclude-test-list exclude test list") + fmt.Println(" -o, --dump-response dump JSON RPC response even if responses are the same") + fmt.Println(" -H, --host host where the RpcDaemon is located [default: localhost]") + fmt.Println(" -p, --port port where the RpcDaemon is located [default: 8545]") + fmt.Println(" -I, --daemon-port Use 51515/51516 ports to server") + fmt.Println(" -e, --verify-external-provider send any request also to external API endpoint as reference") + fmt.Println(" -i, --without-compare-results send request and waits response without compare results") + fmt.Println(" -w, --waiting-time wait time after test execution in milliseconds") + fmt.Println(" -S, --serial all tests run in serial way [default: parallel]") + fmt.Println(" -L, --tests-on-latest-block runs only test on latest block") +} + +func getTarget(targetType, method string, config *Config) string { + if targetType == ExternalProvider { + return config.ExternalProviderURL + } + + if config.VerifyWithDaemon && targetType == DaemonOnOtherPort && strings.Contains(method, "engine_") { + return config.DaemonOnHost + ":51516" + } + + if config.VerifyWithDaemon && targetType == DaemonOnOtherPort { + return config.DaemonOnHost + ":51515" + } + + if targetType == DaemonOnOtherPort && strings.Contains(method, "engine_") { + return config.DaemonOnHost + ":51516" + } + + if targetType == DaemonOnOtherPort { + return config.DaemonOnHost + ":51515" + } + + if strings.Contains(method, "engine_") { + port := config.EnginePort + if port == 0 { + port = 8551 + } + return config.DaemonOnHost + ":" + strconv.Itoa(port) + } + + port := config.ServerPort + if port == 0 { + port = 8545 + } + return config.DaemonOnHost + ":" + strconv.Itoa(port) +} + +func getJSONFilenameExt(targetType, target string) string { + parts := strings.Split(target, ":") + port := "" + if len(parts) > 1 { + port = parts[1] + } + + if targetType == DaemonOnOtherPort { + return "_" + port + "-daemon.json" + } + if targetType == ExternalProvider { + return "-external_provider_url.json" + } + return "_" + port + "-rpcdaemon.json" +} + +func getJWTSecret(filename string) (string, error) { + data, err := os.ReadFile(filename) + if err != nil { + return "", err + } + contents := string(data) + if len(contents) >= 2 && contents[:2] == "0x" { + return contents[2:], nil + } + return strings.TrimSpace(contents), nil +} + +func generateJWTSecret(filename string, length int) error { + if length <= 0 { + length = 64 + } + randomBytes := make([]byte, length/2) + if _, err := rand.Read(randomBytes); err != nil { + return err + } + randomHex := "0x" + hex.EncodeToString(randomBytes) + if err := os.WriteFile(filename, []byte(randomHex), 0600); err != nil { + return err + } + fmt.Printf("Secret File '%s' created with success!\n", filename) + return nil +} + +func toLowerCase(inputFile, outputFile string) error { + inputContent, err := os.ReadFile(inputFile) + if err != nil { + return err + } + + outputContent := []byte(strings.ToLower(string(inputContent))) + + err = os.WriteFile(outputFile, outputContent, 0644) + if err != nil { + return err + } + return nil +} + +func replaceMessage(inputFile, outputFile, matchedString string) error { + inData, err := os.ReadFile(inputFile) + if err != nil { + return err + } + + lines := strings.Split(string(inData), "\n") + var output []string + for _, line := range lines { + if !strings.Contains(line, matchedString) { + output = append(output, line) + } else { + output = append(output, " \"message\": \"\"") + } + } + + return os.WriteFile(outputFile, []byte(strings.Join(output, "\n")), 0644) +} + +func extractNumber(filename string) int { + re := regexp.MustCompile(`\d+`) + match := re.FindString(filename) + if match != "" { + num, _ := strconv.Atoi(match) + return num + } + return 0 +} + +func checkTestNameForNumber(testName string, reqTestNumber int) bool { + if reqTestNumber == -1 { + return true + } + pattern := "_0*" + strconv.Itoa(reqTestNumber) + "($|[^0-9])" + matched, _ := regexp.MatchString(pattern, testName) + return matched +} + +// Part 3: Test Logic Functions + +func isSkipped(currAPI, testName string, globalTestNumber int, config *Config) bool { + apiFullName := config.Net + "/" + currAPI + apiFullTestName := config.Net + "/" + testName + + if (config.ReqTestNumber == -1 || config.TestingAPIs != "" || config.TestingAPIsWith != "") && + !(config.ReqTestNumber != -1 && (config.TestingAPIs != "" || config.TestingAPIsWith != "")) && + config.ExcludeAPIList == "" && config.ExcludeTestList == "" { + for _, currTestName := range apiNotCompared { + if strings.Contains(apiFullName, currTestName) { + return true + } + } + for _, currTest := range testsNotCompared { + if strings.Contains(apiFullTestName, currTest) { + return true + } + } + } + + if config.ExcludeAPIList != "" { + excludeAPIs := strings.Split(config.ExcludeAPIList, ",") + for _, excludeAPI := range excludeAPIs { + if strings.Contains(apiFullName, excludeAPI) || strings.Contains(apiFullTestName, excludeAPI) { + return true + } + } + } + + if config.ExcludeTestList != "" { + excludeTests := strings.Split(config.ExcludeTestList, ",") + for _, excludeTest := range excludeTests { + if excludeTest == strconv.Itoa(globalTestNumber) { + return true + } + } + } + + return false +} + +func verifyInLatestList(testName string, config *Config) bool { + apiFullTestName := config.Net + "/" + testName + if config.TestsOnLatestBlock { + for _, currTest := range testsOnLatest { + if strings.Contains(apiFullTestName, currTest) { + return true + } + } + } + return false +} + +func apiUnderTest(currAPI, testName string, config *Config) bool { + if config.TestingAPIsWith == "" && config.TestingAPIs == "" && !config.TestsOnLatestBlock { + return true + } + + if config.TestingAPIsWith != "" { + tests := strings.Split(config.TestingAPIsWith, ",") + for _, test := range tests { + if strings.Contains(currAPI, test) { + if config.TestsOnLatestBlock && verifyInLatestList(testName, config) { + return true + } + if config.TestsOnLatestBlock { + return false + } + return true + } + } + return false + } + + if config.TestingAPIs != "" { + tests := strings.Split(config.TestingAPIs, ",") + for _, test := range tests { + if test == currAPI { + if config.TestsOnLatestBlock && verifyInLatestList(testName, config) { + return true + } + if config.TestsOnLatestBlock { + return false + } + return true + } + } + return false + } + + if config.TestsOnLatestBlock { + return verifyInLatestList(testName, config) + } + + return false +} + +func isNotComparedMessage(testName, net string) bool { + testFullName := net + "/" + testName + for _, currTestName := range testsNotComparedMessage { + if currTestName == testFullName { + return true + } + } + return false +} + +func isNotComparedError(testName, net string) bool { + testFullName := net + "/" + testName + for _, currTestName := range testsNotComparedError { + if currTestName == testFullName { + return true + } + } + return false +} + +func getJSONFromResponse(target, msg string, verboseLevel int, result interface{}) (interface{}, error) { + if verboseLevel > 2 { + fmt.Printf("%s: [%v]\n", msg, result) + } + + if result == nil { + return nil, errors.New("failed (json response is nil, maybe server is down) on " + target) + } + + return result, nil +} + +func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response, expectedResponse interface{}) error { + if !dumpJSON { + return nil + } + + for attempt := 0; attempt < 10; attempt++ { + if err := os.MkdirAll(outputDir, 0755); err != nil { + fmt.Printf("Exception on makedirs: %s %v\n", outputDir, err) + continue + } + + if daemonFile != "" { + if _, err := os.Stat(daemonFile); err == nil { + err := os.Remove(daemonFile) + if err != nil { + return err + } + } + data, err := json.MarshalIndent(response, "", " ") + if err != nil { + fmt.Printf("Error marshaling daemon response: %v\n", err) + continue + } + if err := os.WriteFile(daemonFile, data, 0644); err != nil { + fmt.Printf("Exception on file write daemon: %v attempt %d\n", err, attempt) + continue + } + } + + if expRspFile != "" { + if _, err := os.Stat(expRspFile); err == nil { + err := os.Remove(expRspFile) + if err != nil { + return err + } + } + data, err := json.MarshalIndent(expectedResponse, "", " ") + if err != nil { + fmt.Printf("Error marshaling expected response: %v\n", err) + continue + } + if err := os.WriteFile(expRspFile, data, 0644); err != nil { + fmt.Printf("Exception on file write expected: %v attempt %d\n", err, attempt) + continue + } + } + break + } + return nil +} + +func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, target string, verboseLevel int) (any, error) { + if transportType == "http" || transportType == "http_comp" || transportType == "https" { + headers := map[string]string{ + "Content-Type": "application/json", + } + + if transportType != "http_comp" { + headers["Accept-Encoding"] = "Identity" + } + + if jwtAuth != "" { + headers["Authorization"] = jwtAuth + } + + targetURL := target + if transportType == "https" { + targetURL = "https://" + target + } else { + targetURL = "http://" + target + } + + client := &http.Client{ + Timeout: 300 * time.Second, + } + + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, bytes.NewBufferString(requestDumps)) + if err != nil { + if verboseLevel > 0 { + fmt.Printf("\nhttp request creation fail: %s %v\n", targetURL, err) + } + return nil, err + } + + for k, v := range headers { + req.Header.Set(k, v) + } + + resp, err := client.Do(req) + if err != nil { + if verboseLevel > 0 { + fmt.Printf("\nhttp connection fail: %s %v\n", targetURL, err) + } + return nil, err + } + defer func(Body io.ReadCloser) { + err := Body.Close() + if err != nil { + fmt.Printf("\nfailed to close response body: %v\n", err) + } + }(resp.Body) + + if resp.StatusCode != 200 { + if verboseLevel > 1 { + fmt.Printf("\npost result status_code: %d\n", resp.StatusCode) + } + return nil, err + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + if verboseLevel > 0 { + fmt.Printf("\nfailed to read response body: %v\n", err) + } + return nil, err + } + + if verboseLevel > 1 { + fmt.Printf("\npost result content: %s\n", string(body)) + } + + var result any + if err = json.Unmarshal(body, &result); err != nil { + if verboseLevel > 0 { + fmt.Printf("\nfailed to parse JSON: %v\n", err) + } + return nil, err + } + + if verboseLevel > 1 { + fmt.Printf("\ntarget: %s\n", target) + fmt.Printf("%s\n", requestDumps) + fmt.Printf("Response: %v\n", result) + } + + return result, nil + } else { + // WebSocket + wsTarget := "ws://" + target + dialer := websocket.Dialer{ + HandshakeTimeout: 300 * time.Second, + } + + headers := http.Header{} + if jwtAuth != "" { + headers.Set("Authorization", jwtAuth) + } + + conn, _, err := dialer.Dial(wsTarget, headers) + if err != nil { + if verboseLevel > 0 { + fmt.Printf("\nwebsocket connection fail: %v\n", err) + } + return nil, err + } + defer func(conn *websocket.Conn) { + err := conn.Close() + if err != nil { + fmt.Printf("\nfailed to close websocket connection: %v\n", err) + } + }(conn) + + if err = conn.WriteMessage(websocket.TextMessage, []byte(requestDumps)); err != nil { + if verboseLevel > 0 { + fmt.Printf("\nwebsocket write fail: %v\n", err) + } + return nil, err + } + + _, message, err := conn.ReadMessage() + if err != nil { + if verboseLevel > 0 { + fmt.Printf("\nwebsocket read fail: %v\n", err) + } + return nil, err + } + + var result any + if err = json.Unmarshal(message, &result); err != nil { + if verboseLevel > 0 { + fmt.Printf("\nfailed to parse JSON: %v\n", err) + } + return nil, err + } + + if verboseLevel > 1 { + fmt.Printf("\ntarget: %s\n", target) + fmt.Printf("%s\n", requestDumps) + fmt.Printf("Response: %v\n", result) + } + + return result, nil + } +} + +// Part 4: Comparison and Test Execution + +func runCompare(useJSONDiff bool, errorFile, tempFile1, tempFile2, diffFile string) bool { + var cmd *exec.Cmd + alreadyFailed := false + + if useJSONDiff { + // Check if json-diff is available + checkCmd := exec.Command("json-diff", "--help") + if err := checkCmd.Run(); err != nil { + useJSONDiff = false + } + } + + if useJSONDiff { + cmd = exec.Command("sh", "-c", fmt.Sprintf("json-diff -s %s %s > %s 2> %s", tempFile2, tempFile1, diffFile, errorFile)) + alreadyFailed = false + } else { + cmd = exec.Command("sh", "-c", fmt.Sprintf("diff %s %s > %s 2> %s", tempFile2, tempFile1, diffFile, errorFile)) + alreadyFailed = true + } + + if err := cmd.Start(); err != nil { + return false + } + + done := make(chan error) + go func() { + done <- cmd.Wait() + }() + + timeout := time.After(time.Duration(MaxTime) * TimeInterval) + ticker := time.NewTicker(TimeInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + // Check if the process is still running + continue + case err := <-done: + // Process completed + if err != nil { + // Non-zero exit, which is expected for diff when files differ + } + + // Check error file size + fileInfo, err := os.Stat(errorFile) + if err == nil && fileInfo.Size() != 0 { + if alreadyFailed { + return false + } + // Try with diff instead + alreadyFailed = true + cmd = exec.Command("sh", "-c", fmt.Sprintf("diff %s %s > %s 2> %s", tempFile2, tempFile1, diffFile, errorFile)) + if err := cmd.Start(); err != nil { + return false + } + go func() { + done <- cmd.Wait() + }() + continue + } + return true + case <-timeout: + // Timeout reached, kill the process + if cmd.Process != nil { + err := cmd.Process.Kill() + if err != nil { + return false + } + } + if alreadyFailed { + return false + } + // Try with diff instead + alreadyFailed = true + cmd = exec.Command("sh", "-c", fmt.Sprintf("diff %s %s > %s 2> %s", tempFile2, tempFile1, diffFile, errorFile)) + if err := cmd.Start(); err != nil { + return false + } + go func() { + done <- cmd.Wait() + }() + timeout = time.After(time.Duration(MaxTime) * TimeInterval) + } + } +} + +func copyFiles(src, dst string) (int64, error) { + sourceFileStat, err := os.Stat(src) + if err != nil { + return 0, err + } + + if !sourceFileStat.Mode().IsRegular() { + return 0, fmt.Errorf("%s is not a regular file", src) + } + + source, err := os.Open(src) + if err != nil { + return 0, err + } + defer func(source *os.File) { + err := source.Close() + if err != nil { + fmt.Printf("failed to close source file: %v\n", err) + } + }(source) + + destination, err := os.Create(dst) + if err != nil { + return 0, err + } + defer func(destination *os.File) { + err := destination.Close() + if err != nil { + fmt.Printf("failed to close destination file: %v\n", err) + } + }(destination) + + nBytes, err := io.Copy(destination, source) + return nBytes, err +} + +func compareJSON(config *Config, response interface{}, jsonFile, daemonFile, expRspFile, diffFile string, testNumber int) (bool, error) { + baseName := filepath.Join(TempDirname, fmt.Sprintf("test_%d", testNumber)) + err := os.MkdirAll(baseName, 0755) + if err != nil { + return false, err + } + + tempFile1 := filepath.Join(baseName, "daemon_lower_case.txt") + tempFile2 := filepath.Join(baseName, "rpc_lower_case.txt") + errorFile := filepath.Join(baseName, "ERROR.txt") + + // Check if response contains error + responseMap, isMap := response.(map[string]interface{}) + hasError := isMap && responseMap["error"] != nil + + if hasError { + err := toLowerCase(daemonFile, tempFile1) + if err != nil { + return false, err + } + err = toLowerCase(expRspFile, tempFile2) + if err != nil { + return false, err + } + } else { + _, err := copyFiles(daemonFile, tempFile1) + if err != nil { + return false, err + } + _, err = copyFiles(expRspFile, tempFile2) + if err != nil { + return false, err + } + } + + if isNotComparedMessage(jsonFile, config.Net) { + err := replaceMessage(expRspFile, tempFile1, "message") + if err != nil { + return false, err + } + err = replaceMessage(daemonFile, tempFile2, "message") + if err != nil { + return false, err + } + } else if isNotComparedError(jsonFile, config.Net) { + err := replaceMessage(expRspFile, tempFile1, "error") + if err != nil { + return false, err + } + err = replaceMessage(daemonFile, tempFile2, "error") + if err != nil { + return false, err + } + } + + diffResult := runCompare(config.UseJSONDiff, errorFile, tempFile1, tempFile2, diffFile) + diffFileSize := int64(0) + + if diffResult { + fileInfo, err := os.Stat(diffFile) + if err != nil { + return false, err + } + diffFileSize = fileInfo.Size() + } + + // Cleanup temp files + err = os.Remove(tempFile1) + if err != nil { + return false, err + } + err = os.Remove(tempFile2) + if err != nil { + return false, err + } + err = os.RemoveAll(baseName) + if err != nil { + return false, err + } + + if diffFileSize != 0 || !diffResult { + if !diffResult { + err = errors.New("failed timeout") + } else { + err = errors.New("failed") + } + return false, err + } + + return true, nil +} + +func processResponse(target, target1 string, result, result1 interface{}, responseInFile interface{}, + config *Config, outputDir, daemonFile, expRspFile, diffFile, jsonFile string, testNumber int) (bool, error) { + + response, err := getJSONFromResponse(target, config.DaemonUnderTest, config.VerboseLevel, result) + if err != nil { + return false, err + } + + var expectedResponse interface{} + if result1 != nil { + expectedResponse, err = getJSONFromResponse(target1, config.DaemonAsReference, config.VerboseLevel, result1) + if err != nil { + return false, err + } + } else { + expectedResponse = responseInFile + } + + if config.WithoutCompareResults { + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + if err != nil { + return false, err + } + return true, nil + } + + if response == nil { + return false, errors.New("Failed [" + config.DaemonUnderTest + "] (server doesn't response)") + } + + if expectedResponse == nil { + return false, errors.New("Failed [" + config.DaemonAsReference + "] (server doesn't response)") + } + + // Deep comparison + respJSON, _ := json.Marshal(response) + expJSON, _ := json.Marshal(expectedResponse) + + if string(respJSON) != string(expJSON) { + responseMap, respIsMap := response.(map[string]interface{}) + expectedMap, expIsMap := expectedResponse.(map[string]interface{}) + + // Check various conditions where we don't care about differences + if respIsMap && expIsMap { + if responseMap["result"] != nil && expectedMap["result"] == nil && result1 == nil { + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + if err != nil { + return false, err + } + return true, nil + } + if responseMap["error"] != nil && expectedMap["error"] == nil { + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + if err != nil { + return false, err + } + return true, nil + } + if responseMap["error"] != nil && expectedMap["error"] != nil && config.DoNotCompareError { + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + if err != nil { + return false, err + } + return true, nil + } + } + + if !expIsMap { + if expMap, ok := expectedResponse.(map[string]interface{}); ok { + if expMap["error"] == nil && expMap["result"] == nil && len(expMap) == 2 { + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + if err != nil { + return false, err + } + return true, nil + } + } + } + + err := dumpJSONs(true, daemonFile, expRspFile, outputDir, response, expectedResponse) + if err != nil { + return false, err + } + + same, err := compareJSON(config, response, jsonFile, daemonFile, expRspFile, diffFile, testNumber) + if err != nil { + return same, err + } + if same { + err := os.Remove(daemonFile) + if err != nil { + return false, err + } + err = os.Remove(expRspFile) + if err != nil { + return false, err + } + err = os.Remove(diffFile) + if err != nil { + return false, err + } + } + + // Try to remove the output directory if empty + if entries, err := os.ReadDir(outputDir); err == nil && len(entries) == 0 { + err := os.Remove(outputDir) + if err != nil { + return false, err + } + } + + err = dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + if err != nil { + return false, err + } + return same, nil + } + + err = dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + if err != nil { + return false, err + } + return true, nil +} + +func runTest(ctx context.Context, jsonFile string, testNumber int, transportType string, config *Config) (bool, error) { + jsonFilename := filepath.Join(config.JSONDir, jsonFile) + ext := filepath.Ext(jsonFile) + + var jsonrpcCommands []JSONRPCCommand + + if ext == ".tar" || ext == ".zip" { + file, err := os.Open(jsonFilename) + if err != nil { + return false, errors.New("cannot open archive file " + jsonFilename) + } + defer func(file *os.File) { + err := file.Close() + if err != nil { + fmt.Printf("\nfailed to close archive file: %v\n", err) + } + }(file) + + tarReader := tar.NewReader(bzip2.NewReader(file)) + _, err = tarReader.Next() + if err != nil { + tarReader = tar.NewReader(file) + _, err = tarReader.Next() + if err != nil { + return false, errors.New("bad archive file " + jsonFilename) + } + } + + buff, err := io.ReadAll(tarReader) + if err != nil { + return false, errors.New("cannot read from archive " + jsonFilename) + } + + if err := json.Unmarshal(buff, &jsonrpcCommands); err != nil { + return false, errors.New("cannot parse JSON from archive " + jsonFilename) + } + } else if ext == ".gzip" { + file, err := os.Open(jsonFilename) + if err != nil { + return false, errors.New("cannot open gzip file " + jsonFilename) + } + defer func(file *os.File) { + err := file.Close() + if err != nil { + fmt.Printf("\nfailed to close gzip file: %v\n", err) + } + }(file) + + gzReader, err := gzip.NewReader(file) + if err != nil { + return false, errors.New("cannot create gzip reader " + jsonFilename) + } + defer func(gzReader *gzip.Reader) { + err := gzReader.Close() + if err != nil { + fmt.Printf("\nfailed to close gzip reader: %v\n", err) + } + }(gzReader) + + buff, err := io.ReadAll(gzReader) + if err != nil { + return false, errors.New("cannot read from gzip " + jsonFilename) + } + + if err := json.Unmarshal(buff, &jsonrpcCommands); err != nil { + return false, errors.New("cannot parse JSON from gzip " + jsonFilename) + } + } else { + data, err := os.ReadFile(jsonFilename) + if err != nil { + return false, errors.New("cannot read file " + jsonFilename) + } + + if err := json.Unmarshal(data, &jsonrpcCommands); err != nil { + return false, errors.New("cannot parse JSON " + jsonFilename) + } + } + + for _, jsonRPC := range jsonrpcCommands { + request := jsonRPC.Request + method := "" + + requestBytes, _ := json.Marshal(request) + var requestMap map[string]interface{} + if err := json.Unmarshal(requestBytes, &requestMap); err == nil { + if m, ok := requestMap["method"].(string); ok { + method = m + } + } else { + // Try an array of requests + var requestArray []map[string]interface{} + if err := json.Unmarshal(requestBytes, &requestArray); err == nil && len(requestArray) > 0 { + if m, ok := requestArray[0]["method"].(string); ok { + method = m + } + } + } + + requestDumps, _ := json.Marshal(request) + target := getTarget(config.DaemonUnderTest, method, config) + target1 := "" + + var jwtAuth string + if config.JWTSecret != "" { + secretBytes, _ := hex.DecodeString(config.JWTSecret) + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "iat": time.Now().Unix(), + }) + tokenString, _ := token.SignedString(secretBytes) + jwtAuth = "Bearer " + tokenString + } + + outputAPIFilename := filepath.Join(config.OutputDir, strings.TrimSuffix(jsonFile, filepath.Ext(jsonFile))) + outputDirName := filepath.Dir(outputAPIFilename) + diffFile := outputAPIFilename + "-diff.json" + + if !config.VerifyWithDaemon { + result, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target, config.VerboseLevel) + if err != nil { + return false, err + } + var result1 any + responseInFile := jsonRPC.Response + + daemonFile := outputAPIFilename + "-response.json" + expRspFile := outputAPIFilename + "-expResponse.json" + + return processResponse(target, target1, result, result1, responseInFile, config, + outputDirName, daemonFile, expRspFile, diffFile, jsonFile, testNumber) + } else { + target = getTarget(DaemonOnDefaultPort, method, config) + result, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target, config.VerboseLevel) + if err != nil { + return false, err + } + target1 = getTarget(config.DaemonAsReference, method, config) + result1, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target1, config.VerboseLevel) + if err != nil { + return false, err + } + var responseInFile any + + daemonFile := outputAPIFilename + getJSONFilenameExt(DaemonOnDefaultPort, target) + expRspFile := outputAPIFilename + getJSONFilenameExt(config.DaemonAsReference, target1) + + return processResponse(target, target1, result, result1, responseInFile, config, + outputDirName, daemonFile, expRspFile, diffFile, jsonFile, testNumber) + } + } + + return true, nil +} + +// Part 5: Command-line Parsing and Main Function + +func mustAtoi(s string) int { + if s == "" { + return 0 + } + n, _ := strconv.Atoi(s) + return n +} + +func main() { + config := NewConfig() + if err := config.parseFlags(); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err) + usage() + os.Exit(-1) + } + + // Clean temp dirs if exists + if _, err := os.Stat(TempDirname); err == nil { + err := os.RemoveAll(TempDirname) + if err != nil { + os.Exit(-1) + } + } + + startTime := time.Now() + err := os.MkdirAll(config.OutputDir, 0755) + if err != nil { + os.Exit(-1) + } + + executedTests := 0 + failedTests := 0 + successTests := 0 + testsNotExecuted := 0 + + var serverEndpoints string + if config.VerifyWithDaemon { + if config.DaemonAsReference == ExternalProvider { + serverEndpoints = "both servers (rpcdaemon with " + config.ExternalProviderURL + ")" + } else { + serverEndpoints = "both servers (rpcdaemon with " + config.DaemonUnderTest + ")" + } + } else { + target := getTarget(config.DaemonUnderTest, "eth_call", config) + target1 := getTarget(config.DaemonUnderTest, "engine_", config) + serverEndpoints = target + "/" + target1 + } + + if config.Parallel { + fmt.Printf("Run tests in parallel on %s\n", serverEndpoints) + } else { + fmt.Printf("Run tests in serial on %s\n", serverEndpoints) + } + + if strings.Contains(config.TransportType, "_comp") { + fmt.Println("Run tests using compression") + } + + resultsAbsoluteDir, err := filepath.Abs(config.ResultsDir) + if err != nil { + os.Exit(-1) + } + fmt.Printf("Result directory: %s\n", resultsAbsoluteDir) + + globalTestNumber := 0 + availableTestedAPIs := 0 + testRep := 0 + + // Worker pool for parallel execution + var wg sync.WaitGroup + testsChan := make(chan *TestDescriptor, 100) + resultsChan := make(chan chan TestResult, 100) + + numWorkers := 1 + if config.Parallel { + numWorkers = 10 // Adjust based on your needs + } + + ctx, cancelCtx := context.WithCancel(context.Background()) + + // Start workers + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case test := <-testsChan: + if test == nil { + return + } + success, err := runTest(ctx, test.Name, test.Number, test.TransportType, config) + test.ResultChan <- TestResult{Success: success, Error: err, Test: test} + case <-ctx.Done(): + return + } + } + }() + } + + // Results collector + var resultsWg sync.WaitGroup + resultsWg.Add(1) + go func() { + defer resultsWg.Done() + for testResultCh := range resultsChan { + result := <-testResultCh + file := fmt.Sprintf("%-60s", result.Test.Name) + tt := fmt.Sprintf("%-15s", result.Test.TransportType) + fmt.Printf("%04d. %s::%s ", result.Test.Number, tt, file) + + if result.Success { + successTests++ + if config.VerboseLevel > 0 { + fmt.Println("OK ") + } else { + fmt.Print("OK \r") + } + } else { + failedTests++ + fmt.Printf("%s\n", strings.ToUpper(result.Error.Error())) + if config.ExitOnFail { + // Signal other tasks to stop and exit + cancelCtx() + return + } + } + } + }() + + defer func() { + if r := recover(); r != nil { + fmt.Println("\nTEST INTERRUPTED!") + } + }() + + for testRep = 0; testRep < config.LoopNumber; testRep++ { + if config.LoopNumber != 1 { + fmt.Printf("\nTest iteration: %d\n", testRep+1) + } + + transportTypes := strings.Split(config.TransportType, ",") + for _, transportType := range transportTypes { + testNumberInAnyLoop := 1 + + dirs, err := os.ReadDir(config.JSONDir) + if err != nil { + _, err := fmt.Fprintf(os.Stderr, "Error reading directory %s: %v\n", config.JSONDir, err) + if err != nil { + return + } + continue + } + + // Sort directories + sort.Slice(dirs, func(i, j int) bool { + return dirs[i].Name() < dirs[j].Name() + }) + + globalTestNumber = 0 + availableTestedAPIs = 0 + + for _, currAPIEntry := range dirs { + currAPI := currAPIEntry.Name() + + // Skip results folder and hidden folders + if currAPI == config.ResultsDir || strings.HasPrefix(currAPI, ".") { + continue + } + + testDir := filepath.Join(config.JSONDir, currAPI) + info, err := os.Stat(testDir) + if err != nil || !info.IsDir() { + continue + } + + availableTestedAPIs++ + + testEntries, err := os.ReadDir(testDir) + if err != nil { + continue + } + + // Sort test files by number + sort.Slice(testEntries, func(i, j int) bool { + return extractNumber(testEntries[i].Name()) < extractNumber(testEntries[j].Name()) + }) + + testNumber := 1 + for _, testEntry := range testEntries { + testName := testEntry.Name() + + if !strings.HasPrefix(testName, "test_") { + continue + } + + ext := filepath.Ext(testName) + if ext != ".zip" && ext != ".gzip" && ext != ".json" && ext != ".tar" { + continue + } + + jsonTestFullName := filepath.Join(currAPI, testName) + + if apiUnderTest(currAPI, jsonTestFullName, config) { + if isSkipped(currAPI, jsonTestFullName, testNumberInAnyLoop, config) { + if config.StartTest == "" || testNumberInAnyLoop >= mustAtoi(config.StartTest) { + if !config.DisplayOnlyFail && config.ReqTestNumber == -1 { + file := fmt.Sprintf("%-60s", jsonTestFullName) + tt := fmt.Sprintf("%-15s", transportType) + fmt.Printf("%04d. %s::%s Skipped\n", testNumberInAnyLoop, tt, file) + } + testsNotExecuted++ + } + } else { + shouldRun := false + if config.TestingAPIsWith == "" && config.TestingAPIs == "" && (config.ReqTestNumber == -1 || config.ReqTestNumber == testNumberInAnyLoop) { + shouldRun = true + } else if config.TestingAPIsWith != "" && checkTestNameForNumber(testName, config.ReqTestNumber) { + shouldRun = true + } else if config.TestingAPIs != "" && checkTestNameForNumber(testName, config.ReqTestNumber) { + shouldRun = true + } + + if shouldRun && (config.StartTest == "" || testNumberInAnyLoop >= mustAtoi(config.StartTest)) { + testDesc := &TestDescriptor{ + Name: jsonTestFullName, + Number: testNumberInAnyLoop, + TransportType: transportType, + ResultChan: make(chan TestResult, 1), + } + resultsChan <- testDesc.ResultChan + testsChan <- testDesc + executedTests++ + + if config.WaitingTime > 0 { + time.Sleep(time.Duration(config.WaitingTime) * time.Millisecond) + } + } + } + } + + globalTestNumber++ + testNumberInAnyLoop++ + testNumber++ + } + } + } + + if config.ExitOnFail && failedTests > 0 { + fmt.Println("TEST ABORTED!") + break + } + } + + // Close channels and wait for completion + close(testsChan) + wg.Wait() + close(resultsChan) + resultsWg.Wait() + + // Clean temp dir + err = os.RemoveAll(TempDirname) + if err != nil { + os.Exit(-1) + } + + // Print results + elapsed := time.Since(startTime) + fmt.Println("\n ") + fmt.Printf("Test time-elapsed: %v\n", elapsed) + fmt.Printf("Available tests: %d\n", globalTestNumber-1) + fmt.Printf("Available tested api: %d\n", availableTestedAPIs) + fmt.Printf("Number of loop: %d\n", testRep) + fmt.Printf("Number of executed tests: %d\n", executedTests) + fmt.Printf("Number of NOT executed tests: %d\n", testsNotExecuted) + fmt.Printf("Number of success tests: %d\n", successTests) + fmt.Printf("Number of failed tests: %d\n", failedTests) + + if failedTests > 0 { + os.Exit(1) + } + os.Exit(0) +} diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..b9197852 --- /dev/null +++ b/go.mod @@ -0,0 +1,15 @@ +module github.com/erigontech/rpc-tests + +go 1.24 + +require ( + github.com/golang-jwt/jwt/v5 v5.3.0 + github.com/gorilla/websocket v1.5.3 + github.com/urfave/cli/v2 v2.27.7 +) + +require ( + github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..a50c75a6 --- /dev/null +++ b/go.sum @@ -0,0 +1,12 @@ +github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= +github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU= +github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= diff --git a/integration/mainnet/debug_traceCallMany/test_02.json b/integration/mainnet/debug_traceCallMany/test_02.json new file mode 100644 index 00000000..9cbfe171 --- /dev/null +++ b/integration/mainnet/debug_traceCallMany/test_02.json @@ -0,0 +1,64 @@ +[ + { + "request": { + "id": 1, + "jsonrpc": "2.0", + "method": "debug_traceCallMany", + "params": [ + [ + { + "blockOverride": { + "blockNumber": "0x4366AC" + }, + "transactions": [ + { + "data": "0x47872b42bad3a36c7b8993d43fce8aa97c5d14a1828f559206552cd1e2e5167dbf7fab1c000000000000000000000000000000000000000000000000002386f26fc10000f3b7eaf85911f23fec4384f41e4e1432194fb7ae66ddef71f291412f7195713b", + "from": "0xB063F38343a46a8A9fFD52a47b26133b7c49788B", + "gas": "0x30D40", + "gasPrice": "0x4", + "value": "0x0" + } + ] + } + ], + { + "blockNumber": "0x4366AC", + "transactionIndex": -1 + }, + { + "disableMemory": false, + "disableStack": false, + "disableStorage": false + } + ] + }, + "response": { + "id": 1, + "jsonrpc": "2.0", + "result": [ + [ + { + "failed": true, + "gas": 200000, + "returnValue": "0x", + "structLogs": [ + { + "depth": 1, + "gas": 141928, + "gasCost": 0, + "memory": [], + "op": "SELFBALANCE", + "pc": 0, + "stack": [] + } + ] + } + ] + ] + }, + "test": { + "description": "", + "reference": "" + } + } +] \ No newline at end of file diff --git a/integration/mainnet/debug_traceCallMany/test_02.tar b/integration/mainnet/debug_traceCallMany/test_02.tar deleted file mode 100644 index daad273504dd8283107c2357e7d0ce2ce34fd6c9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10240 zcmeH~T~FIE6oz}vuc&;zmG~o0GHKFE0c{d6B&6L`2sw^jGLoiqQdBhY-)E1x|flAD{E|9H)$qN5`M8+3K@kkx+w!3}-zCJ+tFrQrV1T;6MjKG#IW95>3PD zGB23Y$2XSW%17i6(;`mAN9ehjIxhCyk&E>c^pD;OgSVV#A@btRL8I?^Ha`>x)6|rf zJ+7-v_qXFzvoR4!$N;)}#uSmV{}D#+VEA8pvg-dlPInUco&8Yo4(xY&G}Pl~`PV!F zC4MY*O@I%z#nKQ*dJVpNRhEmUY}A5l3qT_WG_0v8?lZFuL?T?y?h3`Yn6Mz{ zc)=7)b1hznWZYMyy<#kzpetQ9gD}bX!`DZlR2&KIvR`VptmRkBG!)7N>#PW-bUXoD zxmM22{JfoL-2`-!^PKS_&gR|O)YZEKGfEJ#g1toL`5p=h3R#31rrZntAixp9k_(@) zz~gQN34=cJrRxUBp{|Q~1O*Z$!#9`x`uk)y$fmB(#&icfm_#FF~U`8nO z$ALUQbrB<8h$zH56ix!q_fPimI%DQ5(O)ojPD!&|tF!Af&^xYG%uNxQy4dHCN0uyK z^iF>XnNyy73+%Sj($?R$3$eSTdTYGxz(iKB;xF?^taSeGVJoPPKIl0oU zdeY7$OLEb3_KWtWVEkcUX{OlBKrj8aI8=y#JQKW#>1gX1pF_&S_L0E8v9t5XV zq*+vf3hZ=YZ}NK_&p-Z>@#=lL{G3+tI3C|Uq>KA3eahy{$$-sZTb|V>CS|i&&lzwK zoU>@*q#T;GvXi%>Oy}00#(nA9zss@q7fFk-LfYMq>r5M+sxHLko?*SXqEOOVH zlz(#%6tldQ!B6YEFrxmu?E9~GhcwX=5AYCsc!bAzf~VNWGaTSK4)Fpn@d~f;=3}(_ z>bmCOPd}P}9r*iaP8|b(O2fYn{QWbhj)6a=;a>;-{+Uz9z@O6auLFPo%&BA7{0%MU G_xc+Z1y4)> diff --git a/integration/mainnet/debug_traceCallMany/test_05.tar b/integration/mainnet/debug_traceCallMany/test_05.json similarity index 97% rename from integration/mainnet/debug_traceCallMany/test_05.tar rename to integration/mainnet/debug_traceCallMany/test_05.json index f619289c1a60d672effecd2ea0755c689b45b0b6..f8b632df32a3036f28c88e855d70f4a521284c52 100644 GIT binary patch delta 28 jcmZo@knGqe(Ztrq#@NQj)W*i##>Udd#=4D-jYA#)b+-nz delta 8529 zcmeIy%}#?r6b0b%H^%q?x{@cLcV?KujfqQ_zJVc;MNMdvf=PFla^nwvUa#EXZgZ2OpJFZo8za?tKj-+b1)Yc?5fcIS#T8EsUw zon<;WXx*N?l~uNB`|&zg?)vw6^!?^8Dyn6X-|N&_PtK+;b}4x>=5prA9mM))UgOyC z2ebYvFRN#A@oTwWjZOGB_qx69eZSS)e_FfEiRvMcPIQ`RQrc0Xtv5;k9x&EA70tIb z(SLXkl*?k&;HUdjh_HXkbAR>SD;%f^N8vb}gwt^Laei^T@3~1o{y6+u{Q1qXQT$PZ zKZ`%VIW~$vYVc?A=Qqbj@kb5*EdKoF*eL#}!JoyS-y9poA2s;1`16}%qxho+e-?j! bb8HlU)ZowJ&u@;6;*T2q`M>+e)5yO7n7C8= diff --git a/integration/mainnet/debug_traceCallMany/test_06.tar b/integration/mainnet/debug_traceCallMany/test_06.json similarity index 96% rename from integration/mainnet/debug_traceCallMany/test_06.tar rename to integration/mainnet/debug_traceCallMany/test_06.json index d9ad22d9bffad8c4102b76cead8369b4c1b04009..52f6803f837f70aa780eb073a178645ee2f10dad 100644 GIT binary patch delta 16 XcmZp;z_oc9R})(s8{;-MCgaHfG-w4^ delta 5381 zcmeH{Kdyp65XRS65)Z)%VD>LICYF}oz>r9x3BE)yvBSa}cn2?_2k}S(hGYuEi+S!~ zG28I%E<4}M4`#2I;CK(JD30RUB0bErbd~LdmjTZqUOcPknl{~LGZ!F1NhQLAmvnH@ zs-EnNGTqhwc%4c&{zHz|UroZiT<6(TCRRJJI#!u22yIe>oL3$Y^%0HtdnPiqlR@r2i I;~}^D0mgq0A^-pY delta 7084 zcmeH{OHRW;42Fkip_IoFa)Q*EWF}#OM0YG$^adyrSrjQCHL28Hy6`2q2fYVJVv;J7 z7bvKLxRvm1^3Nmw?Jx1H-zTfjN#&3I@%=PiK4j^0Hdnn2*bMc}v)ZhMQM*~s6$GFI zt-NI~>Cm8+n=FblojZTJ&ZTSrU5>_ICthC8^6V~%O=!V{vGL7qVnHXvo|D^)`g^Xi zEBxNnUuH%51hM~8yuEfM{F}YHx2k-bKm?K1)~LWh6oghs%{`z)ch2jt zH7Wo09w=scCxf5%w?Rbxw|Vcc&W>oH6&~Or9^o;b;3*F949{_h7dXO8yu#~`oAu+a z^A&&k!T7cC=VwkGg+HaiuZ2H9bLuGkDGh!t{P~$vN8wLt@N41E&zw36e@cU23x9s* d)KU0T8vI)L^E0Q8!k^OM*Z-aWf*-ka`~s;QRf7Nk diff --git a/integration/mainnet/debug_traceCallMany/test_08.tar b/integration/mainnet/debug_traceCallMany/test_08.json similarity index 99% rename from integration/mainnet/debug_traceCallMany/test_08.tar rename to integration/mainnet/debug_traceCallMany/test_08.json index d920a9a65bc382fc8c1fafd0415b916d20b4fd4b..865569117cb5f0eea01c3c797a5bc65a2b3ce99f 100644 GIT binary patch delta 94 zcmWN_#}R@6006-uog@*EpkQCLK?x3ztArYO+lB4GKKJ`u|NFWjfrJuCEQwrmlTbkfTpqrBuTlgtO7r9A;+trepH delta 3591 zcmcb7p1wmsVueR)VoGWe15JPd1TbKs>mg#sCNN15pTWS;z{t>uL7{>s z&OtV`9>^&zE=eo`+K((wFE+^jfdpXtjVu&$OEPm)Z4J#$jm->9jm=H<%uIkRV`DBF zJ8n?;pT>T~XZ)b?e@SX_NxXrDURH5_9zF+CA_vObW@g~54oqplY;Iss4`UdZ8yFfI z8ksW~ni`lG8k-rJfXe|RV-rIL1=?jzY(0fA(nj(?86SZ5-+0Z-E>bq_6>Dp-%1q diff --git a/integration/mainnet/debug_traceCallMany/test_10.tar b/integration/mainnet/debug_traceCallMany/test_10.json similarity index 99% rename from integration/mainnet/debug_traceCallMany/test_10.tar rename to integration/mainnet/debug_traceCallMany/test_10.json index 45c6230406b26f482753462109dc1d3691034924..ea65465b969bf5e559a872d6b3efc196dd80ad99 100644 GIT binary patch delta 258 zcmWN_$u@!k0D#fsSD#tQkV>g&Fs3pOkrX22i8>bF^K@Ex2JJi&cXS(k()B_$h@mW*slR&tV;E!mcW z6s06(*^yn@lYKdmL#fD-RHY_$IhGSSl`}b)3%Qglxt1Gg$gSK-Q|_fD57L&7Jj#8BqA237xXXweE_6R^go@t++UwZoW=l9zBmmjrW*a#bQOU<>#R`Ww^HK^op8%OYm z-`>VK@B5SGIB=1RqbR7aRq{CUroCeDxzlZ~7X9+PRxbOu^Z4B#=k<1Xx!szH^3-Q8 z^?4fRe!55_RX^K~Sbx=v9B}^~v;J`ILr}_a*X4g;3Q+5;xuPC%Q?<-fs2e&=MocK<_cH2#&ssS z!A)*)n<+vXI3i*^2`L#l)7;@M_qfjt513_+c^>kJ$2{RF3oP=C=e*!0uXxQHmU#Q^ zT|BwnX4WrX8vShk`sT9H{H2C|Hh+C{*=YV!LqD6pzPW5Pf2pCL&0pVKHk!ZG(9hk^t1Wvo6AP?mm2!n{PoRcqxnk>{cQgF=CaZJrG|bse|>Y= tX#P?|Kbyb4xok9lsiB|EU*B9dn!nW0&*rahE*s5XYUt Date: Thu, 4 Dec 2025 17:52:28 +0100 Subject: [PATCH 02/87] integration: v2 migration to go --- cmd/integration/main.go | 71 ++++++++++++++++++++++++++++++++++------- go.mod | 14 ++++++++ go.sum | 58 +++++++++++++++++++++++++++++++++ 3 files changed, 131 insertions(+), 12 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 4dc44fb5..a37b6b7b 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -26,6 +26,7 @@ import ( "github.com/golang-jwt/jwt/v5" "github.com/gorilla/websocket" + "github.com/josephburnett/jd/v2" ) const ( @@ -171,6 +172,16 @@ var ( } ) +type JsonDiffKind int + +const ( + JdLibrary JsonDiffKind = iota + JsonDiffTool + DiffTool +) + +var jsonDiffKind = JsonDiffTool + type Config struct { ExitOnFail bool DaemonUnderTest string @@ -461,9 +472,9 @@ func (c *Config) UpdateDirs() { // Part 2: Utility Functions func usage() { - fmt.Println("Usage: integration [options]") + fmt.Println("Usage: rpc_int [options]") fmt.Println("") - fmt.Println("Launch an automated RPC test sequence on target blockchain node") + fmt.Println("Launch an automated sequence of RPC integration tests on target blockchain node(s)") fmt.Println("") fmt.Println("Options:") fmt.Println(" -h, --help print this help") @@ -955,6 +966,42 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t // Part 4: Comparison and Test Execution +func compareJSONFiles(errorFileName, fileName1, fileName2, diffFileName string) (bool, error) { + switch jsonDiffKind { + case JdLibrary: + jsonNode1, err := jd.ReadJsonFile(fileName1) + if err != nil { + return false, err + } + jsonNode2, err := jd.ReadJsonFile(fileName2) + if err != nil { + return false, err + } + diff := jsonNode1.Diff(jsonNode2) + diffString := diff.Render() + if diffString == "" { + return false, nil + } + err = os.WriteFile(diffFileName, []byte(diffString), 0644) + if err != nil { + return false, err + } + return true, nil + case JsonDiffTool: + if failed := runCompare(true, errorFileName, fileName1, fileName2, diffFileName); failed { + return false, fmt.Errorf("failed to compare %s and %s using json-diff command", fileName1, fileName2) + } + return true, nil + case DiffTool: + if failed := runCompare(false, errorFileName, fileName1, fileName2, diffFileName); failed { + return false, fmt.Errorf("failed to compare %s and %s using diff command", fileName1, fileName2) + } + return true, nil + default: + return false, fmt.Errorf("unknown JSON diff kind: %d", jsonDiffKind) + } +} + func runCompare(useJSONDiff bool, errorFile, tempFile1, tempFile2, diffFile string) bool { var cmd *exec.Cmd alreadyFailed := false @@ -1042,7 +1089,7 @@ func runCompare(useJSONDiff bool, errorFile, tempFile1, tempFile2, diffFile stri } } -func copyFiles(src, dst string) (int64, error) { +func copyFile(src, dst string) (int64, error) { sourceFileStat, err := os.Stat(src) if err != nil { return 0, err @@ -1103,11 +1150,11 @@ func compareJSON(config *Config, response interface{}, jsonFile, daemonFile, exp return false, err } } else { - _, err := copyFiles(daemonFile, tempFile1) + _, err := copyFile(daemonFile, tempFile1) if err != nil { return false, err } - _, err = copyFiles(expRspFile, tempFile2) + _, err = copyFile(expRspFile, tempFile2) if err != nil { return false, err } @@ -1133,7 +1180,7 @@ func compareJSON(config *Config, response interface{}, jsonFile, daemonFile, exp } } - diffResult := runCompare(config.UseJSONDiff, errorFile, tempFile1, tempFile2, diffFile) + diffResult, err := compareJSONFiles(errorFile, tempFile1, tempFile2, diffFile) diffFileSize := int64(0) if diffResult { @@ -1197,11 +1244,11 @@ func processResponse(target, target1 string, result, result1 interface{}, respon } if response == nil { - return false, errors.New("Failed [" + config.DaemonUnderTest + "] (server doesn't response)") + return false, errors.New("failed [" + config.DaemonUnderTest + "] (server doesn't respond)") } if expectedResponse == nil { - return false, errors.New("Failed [" + config.DaemonAsReference + "] (server doesn't response)") + return false, errors.New("failed [" + config.DaemonAsReference + "] (server doesn't respond)") } // Deep comparison @@ -1564,13 +1611,13 @@ func main() { if result.Success { successTests++ if config.VerboseLevel > 0 { - fmt.Println("OK ") + fmt.Println("OK") } else { - fmt.Print("OK \r") + fmt.Print("OK\r") } } else { failedTests++ - fmt.Printf("%s\n", strings.ToUpper(result.Error.Error())) + fmt.Printf("%s\n", result.Error.Error()) if config.ExitOnFail { // Signal other tasks to stop and exit cancelCtx() @@ -1659,7 +1706,7 @@ func main() { if !config.DisplayOnlyFail && config.ReqTestNumber == -1 { file := fmt.Sprintf("%-60s", jsonTestFullName) tt := fmt.Sprintf("%-15s", transportType) - fmt.Printf("%04d. %s::%s Skipped\n", testNumberInAnyLoop, tt, file) + fmt.Printf("%04d. %s::%s skipped\n", testNumberInAnyLoop, tt, file) } testsNotExecuted++ } diff --git a/go.mod b/go.mod index b9197852..9919840d 100644 --- a/go.mod +++ b/go.mod @@ -5,11 +5,25 @@ go 1.24 require ( github.com/golang-jwt/jwt/v5 v5.3.0 github.com/gorilla/websocket v1.5.3 + github.com/josephburnett/jd/v2 v2.3.0 + github.com/tsenart/vegeta/v12 v12.13.0 github.com/urfave/cli/v2 v2.27.7 ) require ( github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/influxdata/tdigest v0.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/text v0.28.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index a50c75a6..fae29cda 100644 --- a/go.sum +++ b/go.sum @@ -1,12 +1,70 @@ +github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e h1:mWOqoK5jV13ChKf/aF3plwQ96laasTJgZi4f1aSOu+M= +github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654 h1:XOPLOMn/zT4jIgxfxSsoXPxkrzz0FaCHwp33x5POJ+Q= +github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/influxdata/tdigest v0.0.1 h1:XpFptwYmnEKUqmkcDjrzffswZ3nvNeevbUSLPP/ZzIY= +github.com/influxdata/tdigest v0.0.1/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= +github.com/josephburnett/jd/v2 v2.3.0 h1:AyNT0zSStJ2j28zutWDO4fkc95JoICryWQRmDTRzPTQ= +github.com/josephburnett/jd/v2 v2.3.0/go.mod h1:0I5+gbo7y8diuajJjm79AF44eqTheSJy1K7DSbIUFAQ= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 h1:18kd+8ZUlt/ARXhljq+14TwAoKa61q6dX8jtwOf6DH8= +github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/streadway/quantile v0.0.0-20220407130108-4246515d968d h1:X4+kt6zM/OVO6gbJdAfJR60MGPsqCzbtXNnjoGqdfAs= +github.com/streadway/quantile v0.0.0-20220407130108-4246515d968d/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tsenart/vegeta/v12 v12.13.0 h1:J/UiNS3f69MkL0tsRLVUUV8uXXQZxdRUchtS+GYiSFc= +github.com/tsenart/vegeta/v12 v12.13.0/go.mod h1:gpdfR++WHV9/RZh4oux0f6lNPhsOH8pCjIGUlcPQe1M= github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU= github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca h1:PupagGYwj8+I4ubCxcmcBRk3VlUWtTg5huQpZR9flmE= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= From 446e8b7d673c8344fac1c3e5fc5d069dcdc5e32c Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Thu, 4 Dec 2025 18:15:08 +0100 Subject: [PATCH 03/87] integration: v2 migration to go --- cmd/integration/main.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index a37b6b7b..1f9317d3 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1629,7 +1629,7 @@ func main() { defer func() { if r := recover(); r != nil { - fmt.Println("\nTEST INTERRUPTED!") + fmt.Println("\nCRITICAL: TEST SEQUENCE INTERRUPTED!") } }() @@ -1746,11 +1746,15 @@ func main() { } if config.ExitOnFail && failedTests > 0 { - fmt.Println("TEST ABORTED!") + fmt.Println("WARN: test sequence interrupted by failure (ExitOnFail)") break } } + if executedTests == 0 && config.TestingAPIsWith != "" { + fmt.Printf("WARN: API filter %s selected no tests\n", config.TestingAPIsWith) + } + // Close channels and wait for completion close(testsChan) wg.Wait() From b1dac4213278b8b9e23af587153311780cfe863d Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Sat, 6 Dec 2025 14:16:37 +0100 Subject: [PATCH 04/87] integration: v2 migration to go --- cmd/integration/main.go | 439 ++++++++++++++++++++++++++++++++-------- go.mod | 1 + go.sum | 6 + 3 files changed, 356 insertions(+), 90 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 1f9317d3..c3f6194f 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -24,6 +24,7 @@ import ( "sync" "time" + bzip2w "github.com/dsnet/compress/bzip2" "github.com/golang-jwt/jwt/v5" "github.com/gorilla/websocket" "github.com/josephburnett/jd/v2" @@ -172,6 +173,301 @@ var ( } ) +// Supported compression types +const ( + GzipCompression = ".gz" + Bzip2Compression = ".bz2" + NoCompression = "" +) + +// --- Helper Functions --- + +// getCompressionType determines the compression from the filename extension. +func getCompressionType(filename string) string { + if strings.HasSuffix(filename, ".tar.gz") || strings.HasSuffix(filename, ".tgz") { + return GzipCompression + } + if strings.HasSuffix(filename, ".tar.bz2") || strings.HasSuffix(filename, ".tbz") { + return Bzip2Compression + } + return NoCompression +} + +// --- Archiving Logic --- + +// createArchive creates a compressed or uncompressed tar archive. +func createArchive(archivePath string, files []string) error { + // Create the output file + outFile, err := os.Create(archivePath) + if err != nil { + return fmt.Errorf("failed to create output file: %w", err) + } + defer func(outFile *os.File) { + err := outFile.Close() + if err != nil { + fmt.Printf("Failed to close output file: %v\n", err) + } + }(outFile) + + // Wrap the output file with the correct compression writer (if any) + var writer io.WriteCloser = outFile + compressionType := getCompressionType(archivePath) + + switch compressionType { + case GzipCompression: + writer = gzip.NewWriter(outFile) + case Bzip2Compression: + config := &bzip2w.WriterConfig{Level: bzip2w.BestCompression} + writer, err = bzip2w.NewWriter(outFile, config) + if err != nil { + return fmt.Errorf("failed to create bzip2 writer: %w", err) + } + } + + // Create the tar writer + tarWriter := tar.NewWriter(writer) + defer func(writer io.WriteCloser, tarWriter *tar.Writer) { + // Explicitly close the compression writer if it was used (before closing the tar writer) + if compressionType != NoCompression { + if err := writer.Close(); err != nil { + fmt.Printf("failed to close compression writer: %v\n", err) + } + } + + err := tarWriter.Close() + if err != nil { + fmt.Printf("Failed to close tar writer: %v\n", err) + } + }(writer, tarWriter) + + // Add files to the archive + for _, file := range files { + err := addFileToTar(tarWriter, file, "") + if err != nil { + return fmt.Errorf("failed to add file %s: %w", file, err) + } + } + + return nil +} + +// addFileToTar recursively adds a file or directory to the tar archive. +func addFileToTar(tarWriter *tar.Writer, filePath, baseDir string) error { + fileInfo, err := os.Stat(filePath) + if err != nil { + return err + } + + // Determine the name inside the archive (relative path) + var link string + if fileInfo.Mode()&os.ModeSymlink != 0 { + link, err = os.Readlink(filePath) + if err != nil { + return err + } + } + + // If baseDir is not empty, use the relative path, otherwise use the basename + nameInArchive := filePath + if baseDir != "" && strings.HasPrefix(filePath, baseDir) { + nameInArchive = filePath[len(baseDir)+1:] + } else { + nameInArchive = filepath.Base(filePath) + } + + // Create the tar Header + header, err := tar.FileInfoHeader(fileInfo, link) + if err != nil { + return err + } + header.Name = nameInArchive + + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + + // Write file contents if it's a regular file + if fileInfo.Mode().IsRegular() { + file, err := os.Open(filePath) + if err != nil { + return err + } + if _, err := io.Copy(tarWriter, file); err != nil { + _ = file.Close() + return err + } + _ = file.Close() + } + + // Recurse into directories + if fileInfo.IsDir() { + dirEntries, err := os.ReadDir(filePath) + if err != nil { + return err + } + for _, entry := range dirEntries { + fullPath := filepath.Join(filePath, entry.Name()) + // Keep the original baseDir if it was set, otherwise set it to the current path's parent + newBaseDir := baseDir + if baseDir == "" { + // Special handling for the root call: use the current path as the new base. + // This ensures nested files have relative paths within the archive. + newBaseDir = filePath + } + if err := addFileToTar(tarWriter, fullPath, newBaseDir); err != nil { + return err + } + } + } + + return nil +} + +func reopenFile(filePath string, file *os.File) (*os.File, error) { + err := file.Close() + if err != nil && !errors.Is(err, os.ErrClosed) { + return nil, err + } + file, err = os.Open(filePath) + if err != nil { + return nil, err + } + return file, nil +} + +func autodetectCompression(archivePath string, inFile *os.File) (string, error) { + // Assume we have no compression and try to detect it if the tar header is invalid + compressionType := NoCompression + tarReader := tar.NewReader(inFile) + _, err := tarReader.Next() + if err != nil && !errors.Is(err, io.EOF) { + // Reopen the file and check if it's gzip encoded + inFile, err = reopenFile(archivePath, inFile) + if err != nil { + return compressionType, err + } + _, err = gzip.NewReader(inFile) + if err == nil { + compressionType = GzipCompression + } else { + // Reopen the file and check if it's bzip2 encoded + inFile, err = reopenFile(archivePath, inFile) + if err != nil { + return compressionType, err + } + _, err = tar.NewReader(bzip2.NewReader(inFile)).Next() + if err == nil { + compressionType = Bzip2Compression + } + } + err = inFile.Close() + if err != nil { + return compressionType, err + } + } + return compressionType, nil +} + +// extractArchive extracts a compressed or uncompressed tar archive. +func extractArchive(archivePath string, sanitizeExtension bool) ([]string, error) { + // Open the archive file + inputFile, err := os.Open(archivePath) + if err != nil { + return nil, fmt.Errorf("failed to open archive: %w", err) + } + defer func(inFile *os.File) { + _ = inFile.Close() + }(inputFile) + + // Wrap the input file with the correct compression reader + compressionType := getCompressionType(archivePath) + if compressionType == NoCompression { + // Possibly handle the corner case where the file is compressed but has tar extension + compressionType, err = autodetectCompression(archivePath, inputFile) + if err != nil { + return nil, fmt.Errorf("failed to autodetect compression for archive: %w", err) + } + if compressionType != NoCompression { + // If any compression was detected, optionally rename and reopen the archive file + if sanitizeExtension { + err = os.Rename(archivePath, archivePath+compressionType) + if err != nil { + return nil, err + } + archivePath = archivePath + compressionType + } + inputFile, err = os.Open(archivePath) + if err != nil { + return nil, err + } + } + } + + var reader io.Reader + switch compressionType { + case GzipCompression: + if reader, err = gzip.NewReader(inputFile); err != nil { + return nil, fmt.Errorf("failed to create gzip reader: %w", err) + } + case Bzip2Compression: + reader = bzip2.NewReader(inputFile) + case NoCompression: + reader = inputFile + } + + // Iterate over files in the archive and extract them + tarReader := tar.NewReader(reader) + tmpFilePaths := []string{} + for { + header, err := tarReader.Next() + if err == io.EOF { + break // End of archive + } + if err != nil { + return nil, fmt.Errorf("failed to read tar header: %w", err) + } + + targetPath := filepath.Dir(archivePath) + "/" + header.Name + + switch header.Typeflag { + case tar.TypeDir: + // Create directory + if err = os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil { + return nil, fmt.Errorf("failed to create directory %s: %w", targetPath, err) + } + case tar.TypeReg: + // Ensure the parent directory exists before creating the file + if err = os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil { + return nil, fmt.Errorf("failed to create parent directory for %s: %w", targetPath, err) + } + + // Create the file + outputFile, err := os.OpenFile(targetPath, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + if err != nil { + return nil, fmt.Errorf("failed to create file %s: %w", targetPath, err) + } + + // Write content + if _, err = io.Copy(outputFile, tarReader); err != nil { + err = outputFile.Close() + if err != nil { + return nil, err + } + return nil, fmt.Errorf("failed to write file content for %s: %w", targetPath, err) + } + tmpFilePaths = append(tmpFilePaths, targetPath) + err = outputFile.Close() + if err != nil { + return nil, err + } + default: + fmt.Printf("WARN: skipping unsupported file type %c: %s\n", header.Typeflag, targetPath) + } + } + + return tmpFilePaths, nil +} + type JsonDiffKind int const ( @@ -180,7 +476,7 @@ const ( DiffTool ) -var jsonDiffKind = JsonDiffTool +var jsonDiffKind = JdLibrary type Config struct { ExitOnFail bool @@ -214,6 +510,7 @@ type Config struct { DoNotCompareError bool TestsOnLatestBlock bool LocalServer string + SanitizeArchiveExt bool } type TestResult struct { @@ -261,6 +558,7 @@ func NewConfig() *Config { WaitingTime: 0, DoNotCompareError: false, TestsOnLatestBlock: false, + SanitizeArchiveExt: false, } } @@ -469,8 +767,6 @@ func (c *Config) UpdateDirs() { c.LocalServer = "http://" + c.DaemonOnHost + ":" + strconv.Itoa(c.ServerPort) } -// Part 2: Utility Functions - func usage() { fmt.Println("Usage: rpc_int [options]") fmt.Println("") @@ -639,8 +935,6 @@ func checkTestNameForNumber(testName string, reqTestNumber int) bool { return matched } -// Part 3: Test Logic Functions - func isSkipped(currAPI, testName string, globalTestNumber int, config *Config) bool { apiFullName := config.Net + "/" + currAPI apiFullTestName := config.Net + "/" + testName @@ -964,8 +1258,6 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t } } -// Part 4: Comparison and Test Execution - func compareJSONFiles(errorFileName, fileName1, fileName2, diffFileName string) (bool, error) { switch jsonDiffKind { case JdLibrary: @@ -977,23 +1269,20 @@ func compareJSONFiles(errorFileName, fileName1, fileName2, diffFileName string) if err != nil { return false, err } - diff := jsonNode1.Diff(jsonNode2) + diff := jsonNode1.Diff(jsonNode2, jd.SET) diffString := diff.Render() - if diffString == "" { - return false, nil - } err = os.WriteFile(diffFileName, []byte(diffString), 0644) if err != nil { return false, err } return true, nil case JsonDiffTool: - if failed := runCompare(true, errorFileName, fileName1, fileName2, diffFileName); failed { + if success := runCompare(true, errorFileName, fileName1, fileName2, diffFileName); !success { return false, fmt.Errorf("failed to compare %s and %s using json-diff command", fileName1, fileName2) } return true, nil case DiffTool: - if failed := runCompare(false, errorFileName, fileName1, fileName2, diffFileName); failed { + if success := runCompare(false, errorFileName, fileName1, fileName2, diffFileName); !success { return false, fmt.Errorf("failed to compare %s and %s using diff command", fileName1, fileName2) } return true, nil @@ -1261,38 +1550,38 @@ func processResponse(target, target1 string, result, result1 interface{}, respon // Check various conditions where we don't care about differences if respIsMap && expIsMap { - if responseMap["result"] != nil && expectedMap["result"] == nil && result1 == nil { + _, responseHasResult := responseMap["result"] + expectedResult, expectedHasResult := expectedMap["result"] + _, responseHasError := responseMap["error"] + expectedError, expectedHasError := expectedMap["error"] + if responseHasResult && expectedHasResult && expectedResult == nil && result1 == nil { err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err } return true, nil } - if responseMap["error"] != nil && expectedMap["error"] == nil { + if responseHasError && expectedHasError && expectedError == nil { err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err } return true, nil } - if responseMap["error"] != nil && expectedMap["error"] != nil && config.DoNotCompareError { + // TODO: improve len(expectedMap) == 2 which means: just "jsonrpc" and "id" are expected + if !expectedHasResult && !expectedHasError && len(expectedMap) == 2 { err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err } return true, nil } - } - - if !expIsMap { - if expMap, ok := expectedResponse.(map[string]interface{}); ok { - if expMap["error"] == nil && expMap["result"] == nil && len(expMap) == 2 { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) - if err != nil { - return false, err - } - return true, nil + if responseHasError && expectedHasError && config.DoNotCompareError { + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + if err != nil { + return false, err } + return true, nil } } @@ -1342,81 +1631,53 @@ func processResponse(target, target1 string, result, result1 interface{}, respon return true, nil } -func runTest(ctx context.Context, jsonFile string, testNumber int, transportType string, config *Config) (bool, error) { - jsonFilename := filepath.Join(config.JSONDir, jsonFile) - ext := filepath.Ext(jsonFile) +func isArchive(jsonFilename string) bool { + // Treat all files except .json as potential archive files + return !strings.HasSuffix(jsonFilename, ".json") +} +func extractJsonCommands(jsonFilename string) ([]JSONRPCCommand, error) { var jsonrpcCommands []JSONRPCCommand + data, err := os.ReadFile(jsonFilename) + if err != nil { + return jsonrpcCommands, errors.New("cannot read file " + jsonFilename) + } + if err := json.Unmarshal(data, &jsonrpcCommands); err != nil { + return jsonrpcCommands, errors.New("cannot parse JSON " + jsonFilename) + } + return jsonrpcCommands, nil +} - if ext == ".tar" || ext == ".zip" { - file, err := os.Open(jsonFilename) - if err != nil { - return false, errors.New("cannot open archive file " + jsonFilename) - } - defer func(file *os.File) { - err := file.Close() - if err != nil { - fmt.Printf("\nfailed to close archive file: %v\n", err) - } - }(file) - - tarReader := tar.NewReader(bzip2.NewReader(file)) - _, err = tarReader.Next() - if err != nil { - tarReader = tar.NewReader(file) - _, err = tarReader.Next() - if err != nil { - return false, errors.New("bad archive file " + jsonFilename) - } - } - - buff, err := io.ReadAll(tarReader) - if err != nil { - return false, errors.New("cannot read from archive " + jsonFilename) - } +func runTest(ctx context.Context, jsonFile string, testNumber int, transportType string, config *Config) (bool, error) { + jsonFilename := filepath.Join(config.JSONDir, jsonFile) - if err := json.Unmarshal(buff, &jsonrpcCommands); err != nil { - return false, errors.New("cannot parse JSON from archive " + jsonFilename) - } - } else if ext == ".gzip" { - file, err := os.Open(jsonFilename) + var jsonrpcCommands []JSONRPCCommand + var err error + if isArchive(jsonFilename) { + tempFilePaths, err := extractArchive(jsonFilename, config.SanitizeArchiveExt) if err != nil { - return false, errors.New("cannot open gzip file " + jsonFilename) + return false, errors.New("cannot extract archive file " + jsonFilename) } - defer func(file *os.File) { - err := file.Close() - if err != nil { - fmt.Printf("\nfailed to close gzip file: %v\n", err) + removeTempFiles := func() { + for _, path := range tempFilePaths { + err := os.Remove(path) + if err != nil { + fmt.Printf("failed to remove temp file %s: %v\n", path, err) + } } - }(file) - - gzReader, err := gzip.NewReader(file) - if err != nil { - return false, errors.New("cannot create gzip reader " + jsonFilename) } - defer func(gzReader *gzip.Reader) { - err := gzReader.Close() + for _, tempFilePath := range tempFilePaths { + jsonrpcCommands, err = extractJsonCommands(tempFilePath) if err != nil { - fmt.Printf("\nfailed to close gzip reader: %v\n", err) + removeTempFiles() + return false, errors.New("cannot extract JSONRPC commands from " + tempFilePath) } - }(gzReader) - - buff, err := io.ReadAll(gzReader) - if err != nil { - return false, errors.New("cannot read from gzip " + jsonFilename) - } - - if err := json.Unmarshal(buff, &jsonrpcCommands); err != nil { - return false, errors.New("cannot parse JSON from gzip " + jsonFilename) } + removeTempFiles() } else { - data, err := os.ReadFile(jsonFilename) + jsonrpcCommands, err = extractJsonCommands(jsonFilename) if err != nil { - return false, errors.New("cannot read file " + jsonFilename) - } - - if err := json.Unmarshal(data, &jsonrpcCommands); err != nil { - return false, errors.New("cannot parse JSON " + jsonFilename) + return false, errors.New("cannot extract JSONRPC commands from " + jsonFilename) } } @@ -1495,8 +1756,6 @@ func runTest(ctx context.Context, jsonFile string, testNumber int, transportType return true, nil } -// Part 5: Command-line Parsing and Main Function - func mustAtoi(s string) int { if s == "" { return 0 diff --git a/go.mod b/go.mod index 9919840d..83827fd4 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/erigontech/rpc-tests go 1.24 require ( + github.com/dsnet/compress v0.0.1 github.com/golang-jwt/jwt/v5 v5.3.0 github.com/gorilla/websocket v1.5.3 github.com/josephburnett/jd/v2 v2.3.0 diff --git a/go.sum b/go.sum index fae29cda..1f8e465f 100644 --- a/go.sum +++ b/go.sum @@ -6,6 +6,9 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654 h1:XOPLOMn/zT4jIgxfxSsoXPxkrzz0FaCHwp33x5POJ+Q= github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E= +github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q= +github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= @@ -23,6 +26,8 @@ github.com/josephburnett/jd/v2 v2.3.0 h1:AyNT0zSStJ2j28zutWDO4fkc95JoICryWQRmDTR github.com/josephburnett/jd/v2 v2.3.0/go.mod h1:0I5+gbo7y8diuajJjm79AF44eqTheSJy1K7DSbIUFAQ= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -43,6 +48,7 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tsenart/vegeta/v12 v12.13.0 h1:J/UiNS3f69MkL0tsRLVUUV8uXXQZxdRUchtS+GYiSFc= github.com/tsenart/vegeta/v12 v12.13.0/go.mod h1:gpdfR++WHV9/RZh4oux0f6lNPhsOH8pCjIGUlcPQe1M= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU= github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= From 84af64a62cdb704795f401da17971bb96c4a3542 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Mon, 8 Dec 2025 17:21:46 +0100 Subject: [PATCH 05/87] integration: v2 migration to go --- cmd/perf/main.go | 1947 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1947 insertions(+) create mode 100644 cmd/perf/main.go diff --git a/cmd/perf/main.go b/cmd/perf/main.go new file mode 100644 index 00000000..1bbd0e2b --- /dev/null +++ b/cmd/perf/main.go @@ -0,0 +1,1947 @@ +package main + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "context" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "os/user" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + vegeta "github.com/tsenart/vegeta/v12/lib" + "github.com/urfave/cli/v2" +) + +const ( + DefaultTestSequence = "50:30,1000:30,2500:20,10000:20" + DefaultRepetitions = 10 + DefaultVegetaPatternTarFile = "" + DefaultDaemonVegetaOnCore = "-:-" + DefaultErigonBuildDir = "" + DefaultSilkwormBuildDir = "" + DefaultErigonAddress = "localhost" + DefaultTestMode = "3" + DefaultWaitingTime = 5 + DefaultMaxConn = "9000" + DefaultTestType = "eth_getLogs" + DefaultVegetaResponseTimeout = "300s" + DefaultMaxBodyRsp = "1500" + + Silkworm = "silkworm" + Erigon = "rpcdaemon" + BinaryDir = "bin" + SilkwormServerName = "rpcdaemon" + ErigonServerName = "rpcdaemon" +) + +var ( + RunTestDirname string + VegetaPatternDirname string + VegetaReport string + VegetaTarFileName string + VegetaPatternSilkwormBase string + VegetaPatternErigonBase string +) + +func init() { + // Generate a random directory name + timestamp := time.Now().UnixNano() + RunTestDirname = fmt.Sprintf("/tmp/run_tests_%d", timestamp) + VegetaPatternDirname = RunTestDirname + "/erigon_stress_test" + VegetaReport = RunTestDirname + "/vegeta_report.hrd" + VegetaTarFileName = RunTestDirname + "/vegeta_TAR_File" + VegetaPatternSilkwormBase = VegetaPatternDirname + "/vegeta_geth_" + VegetaPatternErigonBase = VegetaPatternDirname + "/vegeta_erigon_" +} + +// Config holds all configuration for the performance test +type Config struct { + VegetaPatternTarFile string + DaemonVegetaOnCore string + ErigonDir string + SilkwormDir string + Repetitions int + TestSequence string + RPCDaemonAddress string + TestMode string + TestType string + TestingDaemon string + WaitingTime int + VersionedTestReport bool + Verbose bool + MacConnection bool + CheckServerAlive bool + Tracing bool + EmptyCache bool + CreateTestReport bool + MaxConnection string + VegetaResponseTimeout string + MaxBodyRsp string + JSONReportFile string + BinaryFileFullPathname string + BinaryFile string + ChainName string + MorePercentiles bool + InstantReport bool + HaltOnVegetaError bool +} + +// NewConfig creates a new Config with default values +func NewConfig() *Config { + return &Config{ + VegetaPatternTarFile: DefaultVegetaPatternTarFile, + DaemonVegetaOnCore: DefaultDaemonVegetaOnCore, + ErigonDir: DefaultErigonBuildDir, + SilkwormDir: DefaultSilkwormBuildDir, + Repetitions: DefaultRepetitions, + TestSequence: DefaultTestSequence, + RPCDaemonAddress: DefaultErigonAddress, + TestMode: DefaultTestMode, + TestType: DefaultTestType, + TestingDaemon: "", + WaitingTime: DefaultWaitingTime, + VersionedTestReport: false, + Verbose: false, + MacConnection: false, + CheckServerAlive: true, + Tracing: false, + EmptyCache: false, + CreateTestReport: false, + MaxConnection: DefaultMaxConn, + VegetaResponseTimeout: DefaultVegetaResponseTimeout, + MaxBodyRsp: DefaultMaxBodyRsp, + JSONReportFile: "", + BinaryFileFullPathname: "", + BinaryFile: "", + ChainName: "mainnet", + MorePercentiles: false, + InstantReport: false, + HaltOnVegetaError: false, + } +} + +// Validate checks the configuration for conflicts and invalid values +func (c *Config) Validate() error { + if c.JSONReportFile != "" && c.TestMode == "3" { + return fmt.Errorf("incompatible option json-report with test-mode=3") + } + + if c.TestMode == "3" && c.TestingDaemon != "" { + return fmt.Errorf("incompatible option test-mode=3 and testing-daemon") + } + + if c.JSONReportFile != "" && c.TestingDaemon == "" { + return fmt.Errorf("with json-report must also set testing-daemon") + } + + if (c.ErigonDir != DefaultErigonBuildDir || c.SilkwormDir != DefaultSilkwormBuildDir) && + c.RPCDaemonAddress != DefaultErigonAddress { + return fmt.Errorf("incompatible option rpc-daemon-address with erigon-dir/silk-dir") + } + + if c.EmptyCache { + currentUser, err := user.Current() + if err != nil { + return fmt.Errorf("failed to get current user: %w", err) + } + if currentUser.Username != "root" { + return fmt.Errorf("empty-cache option can only be used by root") + } + } + + if c.CreateTestReport { + if _, err := os.Stat(c.ErigonDir); c.ErigonDir != "" && os.IsNotExist(err) { + return fmt.Errorf("erigon build dir not specified correctly: %s", c.ErigonDir) + } + + if _, err := os.Stat(c.SilkwormDir); c.SilkwormDir != "" && os.IsNotExist(err) { + return fmt.Errorf("silkworm build dir not specified correctly: %s", c.SilkwormDir) + } + } + + return nil +} + +// TestSequenceItem represents a single test in the sequence +type TestSequenceItem struct { + QPS int + Duration int +} + +type TestSequence []TestSequenceItem + +// ParseTestSequence parses the test sequence string into structured items +func ParseTestSequence(sequence string) ([]TestSequenceItem, error) { + var items []TestSequenceItem + + parts := strings.Split(sequence, ",") + for _, part := range parts { + qpsDur := strings.Split(part, ":") + if len(qpsDur) != 2 { + return nil, fmt.Errorf("invalid test sequence format: %s", part) + } + + qps, err := strconv.Atoi(qpsDur[0]) + if err != nil { + return nil, fmt.Errorf("invalid QPS value: %s", qpsDur[0]) + } + + duration, err := strconv.Atoi(qpsDur[1]) + if err != nil { + return nil, fmt.Errorf("invalid duration value: %s", qpsDur[1]) + } + + items = append(items, TestSequenceItem{ + QPS: qps, + Duration: duration, + }) + } + + return items, nil +} + +// VegetaTarget represents a single HTTP request target for Vegeta +type VegetaTarget struct { + Method string `json:"method"` + URL string `json:"url"` + Body []byte `json:"body,omitempty"` + Header map[string][]string `json:"header,omitempty"` +} + +// TestMetrics holds the results of a performance test +type TestMetrics struct { + DaemonName string + TestNumber int + Repetition int + QPS string + Duration string + MinLatency string + Mean string + P50 string + P90 string + P95 string + P99 string + MaxLatency string + SuccessRatio string + Error string + VegetaMetrics *vegeta.Metrics +} + +// JSONReport represents the structure of the JSON performance report +type JSONReport struct { + Platform PlatformInfo `json:"platform"` + Configuration ConfigurationInfo `json:"configuration"` + Results []TestResult `json:"results"` +} + +// PlatformInfo holds platform hardware and software information +type PlatformInfo struct { + Vendor string `json:"vendor"` + Product string `json:"product"` + Board string `json:"board"` + CPU string `json:"cpu"` + Bogomips string `json:"bogomips"` + Kernel string `json:"kernel"` + GCCVersion string `json:"gccVersion"` + GoVersion string `json:"goVersion"` + SilkrpcCommit string `json:"silkrpcCommit"` + ErigonCommit string `json:"erigonCommit"` +} + +// ConfigurationInfo holds test configuration information +type ConfigurationInfo struct { + TestingDaemon string `json:"testingDaemon"` + TestingAPI string `json:"testingApi"` + TestSequence string `json:"testSequence"` + TestRepetitions int `json:"testRepetitions"` + VegetaFile string `json:"vegetaFile"` + VegetaChecksum string `json:"vegetaChecksum"` + Taskset string `json:"taskset"` +} + +// TestResult holds results for a single QPS/duration test +type TestResult struct { + QPS string `json:"qps"` + Duration string `json:"duration"` + TestRepetitions []RepetitionInfo `json:"testRepetitions"` +} + +// RepetitionInfo holds information for a single test repetition +type RepetitionInfo struct { + VegetaBinary string `json:"vegetaBinary"` + VegetaReport map[string]interface{} `json:"vegetaReport"` + VegetaReportHdrPlot string `json:"vegetaReportHdrPlot"` +} + +// Part 2: Hardware utilities and helper functions + +// Hardware provides methods to extract hardware information +type Hardware struct{} + +// Vendor returns the system vendor +func (h *Hardware) Vendor() string { + if runtime.GOOS != "linux" { + return "unknown" + } + data, err := os.ReadFile("/sys/devices/virtual/dmi/id/sys_vendor") + if err != nil { + return "unknown" + } + return strings.TrimSpace(string(data)) +} + +// NormalizedVendor returns the system vendor as a lowercase first token +func (h *Hardware) NormalizedVendor() string { + vendor := h.Vendor() + parts := strings.Split(vendor, " ") + if len(parts) > 0 { + return strings.ToLower(parts[0]) + } + return "unknown" +} + +// Product returns the system product name +func (h *Hardware) Product() string { + if runtime.GOOS != "linux" { + return "unknown" + } + data, err := os.ReadFile("/sys/devices/virtual/dmi/id/product_name") + if err != nil { + return "unknown" + } + return strings.TrimSpace(string(data)) +} + +// Board returns the system board name +func (h *Hardware) Board() string { + if runtime.GOOS != "linux" { + return "unknown" + } + data, err := os.ReadFile("/sys/devices/virtual/dmi/id/board_name") + if err != nil { + return "unknown" + } + return strings.TrimSpace(string(data)) +} + +// NormalizedProduct returns the system product name as lowercase without whitespaces +func (h *Hardware) NormalizedProduct() string { + product := h.Product() + return strings.ToLower(strings.ReplaceAll(product, " ", "")) +} + +// NormalizedBoard returns the board name as a lowercase name without whitespaces +func (h *Hardware) NormalizedBoard() string { + board := h.Board() + parts := strings.Split(board, "/") + if len(parts) > 0 { + return strings.ToLower(strings.ReplaceAll(parts[0], " ", "")) + } + return "unknown" +} + +// GetCPUModel returns the CPU model information +func (h *Hardware) GetCPUModel() string { + if runtime.GOOS != "linux" { + return "unknown" + } + + cmd := exec.Command("sh", "-c", "cat /proc/cpuinfo | grep 'model name' | uniq") + output, err := cmd.Output() + if err != nil { + return "unknown" + } + + parts := strings.Split(string(output), ":") + if len(parts) > 1 { + return strings.TrimSpace(parts[1]) + } + return "unknown" +} + +// GetBogomips returns the bogomips value +func (h *Hardware) GetBogomips() string { + if runtime.GOOS != "linux" { + return "unknown" + } + + cmd := exec.Command("sh", "-c", "cat /proc/cpuinfo | grep 'bogomips' | uniq") + output, err := cmd.Output() + if err != nil { + return "unknown" + } + + parts := strings.Split(string(output), ":") + if len(parts) > 1 { + return strings.TrimSpace(parts[1]) + } + return "unknown" +} + +// GetKernelVersion returns the kernel version +func GetKernelVersion() string { + cmd := exec.Command("uname", "-r") + output, err := cmd.Output() + if err != nil { + return "unknown" + } + return strings.TrimSpace(string(output)) +} + +// GetGCCVersion returns the GCC version +func GetGCCVersion() string { + cmd := exec.Command("gcc", "--version") + output, err := cmd.Output() + if err != nil { + return "unknown" + } + lines := strings.Split(string(output), "\n") + if len(lines) > 0 { + return strings.TrimSpace(lines[0]) + } + return "unknown" +} + +// GetGoVersion returns the Go version +func GetGoVersion() string { + cmd := exec.Command("go", "version") + output, err := cmd.Output() + if err != nil { + return "unknown" + } + return strings.TrimSpace(string(output)) +} + +// GetGitCommit returns the git commit hash for a directory +func GetGitCommit(dir string) string { + if dir == "" { + return "" + } + + cmd := exec.Command("git", "rev-parse", "HEAD") + cmd.Dir = dir + output, err := cmd.Output() + if err != nil { + return "" + } + return strings.TrimSpace(string(output)) +} + +// GetFileChecksum returns the checksum of a file +func GetFileChecksum(filepath string) string { + cmd := exec.Command("sum", filepath) + output, err := cmd.Output() + if err != nil { + return "" + } + parts := strings.Split(string(output), " ") + if len(parts) > 0 { + return parts[0] + } + return "" +} + +// IsProcessRunning checks if a process with the given name is running +func IsProcessRunning(processName string) bool { + cmd := exec.Command("pgrep", "-f", processName) + err := cmd.Run() + return err == nil +} + +// EmptyCache drops OS caches +func EmptyCache() error { + var cmd *exec.Cmd + + switch runtime.GOOS { + case "linux": + // Sync and drop caches + if err := exec.Command("sync").Run(); err != nil { + return fmt.Errorf("sync failed: %w", err) + } + cmd = exec.Command("sh", "-c", "echo 3 > /proc/sys/vm/drop_caches") + case "darwin": + // macOS purge + if err := exec.Command("sync").Run(); err != nil { + return fmt.Errorf("sync failed: %w", err) + } + cmd = exec.Command("purge") + default: + return fmt.Errorf("unsupported OS: %s", runtime.GOOS) + } + + if err := cmd.Run(); err != nil { + return fmt.Errorf("cache purge failed: %w", err) + } + + return nil +} + +// FormatDuration formats a duration string with units +func FormatDuration(d time.Duration) string { + if d < time.Millisecond { + return fmt.Sprintf("%.0fµs", float64(d.Microseconds())) + } + if d < time.Second { + return fmt.Sprintf("%.2fms", float64(d.Microseconds())/1000.0) + } + return fmt.Sprintf("%.2fs", d.Seconds()) +} + +// ParseLatency parses a latency string and returns it in a consistent format +func ParseLatency(latency string) string { + // Replace microsecond symbol and normalise + latency = strings.ReplaceAll(latency, "µs", "us") + return strings.TrimSpace(latency) +} + +// Part 3: PerfTest implementation + +// PerfTest manages performance test execution +type PerfTest struct { + config *Config + testReport *TestReport +} + +// NewPerfTest creates a new performance test instance +func NewPerfTest(config *Config, testReport *TestReport) (*PerfTest, error) { + pt := &PerfTest{ + config: config, + testReport: testReport, + } + + // Initial cleanup + if err := pt.Cleanup(true); err != nil { + return nil, fmt.Errorf("initial cleanup failed: %w", err) + } + + // Copy and extract the pattern file + if err := pt.CopyAndExtractPatternFile(); err != nil { + return nil, fmt.Errorf("failed to setup pattern file: %w", err) + } + + return pt, nil +} + +// Cleanup removes temporary files +func (pt *PerfTest) Cleanup(initial bool) error { + filesToRemove := []string{ + VegetaTarFileName, + "perf.data.old", + "perf.data", + } + + for _, fileName := range filesToRemove { + _, err := os.Stat(fileName) + if errors.Is(err, os.ErrNotExist) { + continue + } + err = os.Remove(fileName) + if err != nil { + return err + } + } + + // Remove the pattern directory + err := os.RemoveAll(VegetaPatternDirname) + if err != nil { + return err + } + + // Remove the run test directory + if initial { + err := os.RemoveAll(RunTestDirname) + if err != nil { + return err + } + } else { + // Try to remove, ignore if not empty + _ = os.Remove(RunTestDirname) + } + + return nil +} + +// CopyAndExtractPatternFile copies and extracts the vegeta pattern tar file +func (pt *PerfTest) CopyAndExtractPatternFile() error { + // Check if the pattern file exists + if _, err := os.Stat(pt.config.VegetaPatternTarFile); os.IsNotExist(err) { + return fmt.Errorf("invalid pattern file: %s", pt.config.VegetaPatternTarFile) + } + + // Create the run test directory + if err := os.MkdirAll(RunTestDirname, 0755); err != nil { + return fmt.Errorf("failed to create temp directory: %w", err) + } + + // Copy tar file + if err := pt.copyFile(pt.config.VegetaPatternTarFile, VegetaTarFileName); err != nil { + return fmt.Errorf("failed to copy pattern file: %w", err) + } + + if pt.config.Tracing { + fmt.Printf("Copy Vegeta pattern: %s -> %s\n", pt.config.VegetaPatternTarFile, VegetaTarFileName) + } + + // Extract tar file + if err := pt.extractTarGz(VegetaTarFileName, RunTestDirname); err != nil { + return fmt.Errorf("failed to extract pattern file: %w", err) + } + + if pt.config.Tracing { + fmt.Printf("Extracting Vegeta pattern to: %s\n", RunTestDirname) + } + + // Substitute address if not localhost + if pt.config.RPCDaemonAddress != "localhost" { + silkwormPattern := VegetaPatternSilkwormBase + pt.config.TestType + ".txt" + erigonPattern := VegetaPatternErigonBase + pt.config.TestType + ".txt" + + if err := pt.replaceInFile(silkwormPattern, "localhost", pt.config.RPCDaemonAddress); err != nil { + log.Printf("Warning: failed to replace address in silkworm pattern: %v", err) + } + + if err := pt.replaceInFile(erigonPattern, "localhost", pt.config.RPCDaemonAddress); err != nil { + log.Printf("Warning: failed to replace address in erigon pattern: %v", err) + } + } + + return nil +} + +// copyFile copies a file from src to dst +func (pt *PerfTest) copyFile(src, dst string) error { + sourceFile, err := os.Open(src) + if err != nil { + return err + } + defer func(sourceFile *os.File) { + err := sourceFile.Close() + if err != nil { + log.Printf("Warning: failed to close source file: %v", err) + } + }(sourceFile) + + destFile, err := os.Create(dst) + if err != nil { + return err + } + defer func(destFile *os.File) { + err := destFile.Close() + if err != nil { + log.Printf("Warning: failed to close destination file: %v", err) + } + }(destFile) + + _, err = io.Copy(destFile, sourceFile) + return err +} + +// extractTarGz extracts a tar.gz file to a destination directory +func (pt *PerfTest) extractTarGz(tarFile, destDir string) error { + file, err := os.Open(tarFile) + if err != nil { + return err + } + defer func(file *os.File) { + err := file.Close() + if err != nil { + log.Printf("Warning: failed to close tar file: %v", err) + } + }(file) + + /*gzr, err := gzip.NewReader(file) + if err != nil { + return err + } + defer func(gzr *gzip.Reader) { + err := gzr.Close() + if err != nil { + log.Printf("Warning: failed to close gzip reader: %v", err) + } + }(gzr)*/ + + tr := tar.NewReader(bzip2.NewReader(file)) + + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + target := filepath.Join(destDir, header.Name) + + switch header.Typeflag { + case tar.TypeDir: + if err := os.MkdirAll(target, 0755); err != nil { + return err + } + case tar.TypeReg: + outFile, err := os.Create(target) + if err != nil { + return err + } + if _, err := io.Copy(outFile, tr); err != nil { + err := outFile.Close() + if err != nil { + return err + } + return err + } + err = outFile.Close() + if err != nil { + return err + } + } + } + + return nil +} + +// replaceInFile replaces old string with new string in a file +func (pt *PerfTest) replaceInFile(filepath, old, new string) error { + input, err := os.ReadFile(filepath) + if err != nil { + return err + } + + output := strings.ReplaceAll(string(input), old, new) + + return os.WriteFile(filepath, []byte(output), 0644) +} + +// Execute runs a single performance test +func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, name, qpsValue, duration string, format ResultFormat) error { + // Empty cache if configured + if pt.config.EmptyCache { + if err := EmptyCache(); err != nil { + log.Printf("Warning: failed to empty cache: %v", err) + } + } + + // Determine pattern file + var pattern string + if name == Silkworm { + pattern = VegetaPatternSilkwormBase + pt.config.TestType + ".txt" + } else { + pattern = VegetaPatternErigonBase + pt.config.TestType + ".txt" + } + + // Create the binary file name + timestamp := time.Now().Format("20060102150405") + pt.config.BinaryFile = fmt.Sprintf("%s_%s_%s_%s_%s_%s_%d.bin", + timestamp, + pt.config.ChainName, + pt.config.TestingDaemon, + pt.config.TestType, + qpsValue, + duration, + repetition+1) + + // Create the binary directory + var dirname string + if pt.config.VersionedTestReport { + dirname = "./reports/" + BinaryDir + "/" + } else { + dirname = RunTestDirname + "/" + BinaryDir + "/" + } + + if err := os.MkdirAll(dirname, 0755); err != nil { + return fmt.Errorf("failed to create binary directory: %w", err) + } + + pt.config.BinaryFileFullPathname = dirname + pt.config.BinaryFile + + // Print test result information + maxRepetitionDigits := strconv.Itoa(format.maxRepetitionDigits) + maxQpsDigits := strconv.Itoa(format.maxQpsDigits) + maxDurationDigits := strconv.Itoa(format.maxDurationDigits) + if pt.config.TestingDaemon != "" { + fmt.Printf("[%d.%"+maxRepetitionDigits+"d] %s: executes test qps: %"+maxQpsDigits+"s time: %"+maxDurationDigits+"s -> ", + testNumber, repetition+1, pt.config.TestingDaemon, qpsValue, duration) + } else { + fmt.Printf("[%d.%"+maxRepetitionDigits+"d] daemon: executes test qps: %"+maxQpsDigits+"s time: %"+maxDurationDigits+"s -> ", + testNumber, repetition+1, qpsValue, duration) + } + + // Load targets from pattern file + targets, err := pt.loadTargets(pattern) + if err != nil { + return fmt.Errorf("failed to load targets: %w", err) + } + + // Parse QPS and duration + qps, err := strconv.Atoi(qpsValue) + if err != nil { + return fmt.Errorf("invalid QPS value: %w", err) + } + + dur, err := strconv.Atoi(duration) + if err != nil { + return fmt.Errorf("invalid duration value: %w", err) + } + + // Run vegeta attack + metrics, err := pt.runVegetaAttack(ctx, targets, qps, time.Duration(dur)*time.Second, pt.config.BinaryFileFullPathname) + if err != nil { + return fmt.Errorf("vegeta attack failed: %w", err) + } + + // Check if the server is still alive during the test + if pt.config.CheckServerAlive { + var serverName string + if name == Silkworm { + serverName = SilkwormServerName + } else { + serverName = ErigonServerName + } + + if !IsProcessRunning(serverName) { + fmt.Println("test failed: server is Dead") + return fmt.Errorf("server died during test") + } + } + + // Process results + return pt.processResults(testNumber, repetition, name, qpsValue, duration, metrics) +} + +// loadTargets loads Vegeta targets from a pattern file +func (pt *PerfTest) loadTargets(filepath string) ([]vegeta.Target, error) { + file, err := os.Open(filepath) + if err != nil { + return nil, err + } + defer func(file *os.File) { + err := file.Close() + if err != nil { + log.Printf("Warning: failed to close pattern file: %v", err) + } + }(file) + + var targets []vegeta.Target + scanner := bufio.NewScanner(file) + buffer := make([]byte, 0, 256*1024) + scanner.Buffer(buffer, cap(buffer)) + + for scanner.Scan() { + line := scanner.Text() + if line == "" { + continue + } + + var vt VegetaTarget + if err := json.Unmarshal([]byte(line), &vt); err != nil { + return nil, fmt.Errorf("failed to parse target: %w", err) + } + + target := vegeta.Target{ + Method: vt.Method, + URL: vt.URL, + Body: vt.Body, + Header: make(http.Header), + } + + for k, v := range vt.Header { + for _, vv := range v { + target.Header.Set(k, vv) + } + } + + targets = append(targets, target) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + if len(targets) == 0 { + return nil, fmt.Errorf("no targets found in pattern file") + } + + // Print test port information + /*if pt.config.Verbose { + fmt.Printf("Test on port: %s\n", targets[0].URL) + }*/ + + return targets, nil +} + +// runVegetaAttack executes a Vegeta attack using the library +func (pt *PerfTest) runVegetaAttack(ctx context.Context, targets []vegeta.Target, qps int, duration time.Duration, outputFile string) (*vegeta.Metrics, error) { + // Create rate + rate := vegeta.Rate{Freq: qps, Per: time.Second} + + // Create targeter + targeter := vegeta.NewStaticTargeter(targets...) + + // Create attacker + timeout, _ := time.ParseDuration(pt.config.VegetaResponseTimeout) + maxConnInt, _ := strconv.Atoi(pt.config.MaxConnection) + maxBodyInt, _ := strconv.Atoi(pt.config.MaxBodyRsp) + + attacker := vegeta.NewAttacker( + vegeta.Timeout(timeout), + vegeta.Workers(uint64(maxConnInt)), + vegeta.MaxBody(int64(maxBodyInt)), + vegeta.KeepAlive(true), + ) + + // Create the output file for results + out, err := os.Create(outputFile) + if err != nil { + return nil, fmt.Errorf("failed to create output file: %w", err) + } + defer func(out *os.File) { + err := out.Close() + if err != nil { + log.Printf("Warning: failed to close output file: %v", err) + } + }(out) + + encoder := vegeta.NewEncoder(out) + + // Execute the attack i.e. the test workload + var metrics vegeta.Metrics + resultCh := attacker.Attack(targeter, rate, duration, "vegeta-attack") + for { + select { + case result := <-resultCh: + if result == nil { + metrics.Close() + return &metrics, nil + } + metrics.Add(result) + if err := encoder.Encode(result); err != nil { + log.Printf("Warning: failed to encode result: %v", err) + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +// ExecuteSequence executes a sequence of performance tests +func (pt *PerfTest) ExecuteSequence(ctx context.Context, sequence []TestSequenceItem, tag string) error { + testNumber := 1 + + // Get pattern to extract port information + var pattern string + if tag == Silkworm { + pattern = VegetaPatternSilkwormBase + pt.config.TestType + ".txt" + } else { + pattern = VegetaPatternErigonBase + pt.config.TestType + ".txt" + } + + // Print port information + if file, err := os.Open(pattern); err == nil { + scanner := bufio.NewScanner(file) + if scanner.Scan() { + var vt VegetaTarget + if json.Unmarshal([]byte(scanner.Text()), &vt) == nil { + fmt.Printf("Test on port: %s\n", vt.URL) + } + } + err = file.Close() + if err != nil { + return err + } + } + + maxQpsDigits, maxDurationDigits := maxQpsAndDurationDigits(sequence) + resultFormat := ResultFormat{ + maxRepetitionDigits: countDigits(pt.config.Repetitions), + maxQpsDigits: maxQpsDigits, + maxDurationDigits: maxDurationDigits, + } + + // Execute each test in sequence + for _, test := range sequence { + for rep := 0; rep < pt.config.Repetitions; rep++ { + if test.QPS > 0 { + err := pt.Execute(ctx, testNumber, rep, tag, + strconv.Itoa(test.QPS), + strconv.Itoa(test.Duration), + resultFormat) + + if err != nil { + fmt.Printf("Server dead test Aborted! Error: %v\n", err) + return err + } + } else { + // qps = 0 means we've been asked for a silence period + time.Sleep(time.Duration(test.Duration) * time.Second) + } + + time.Sleep(time.Duration(pt.config.WaitingTime) * time.Second) + } + testNumber++ + fmt.Println() + } + + return nil +} + +func countDigits(n int) int { + if n == 0 { + return 1 + } + digits := 0 + for n != 0 { + n /= 10 + digits++ + } + return digits +} + +func maxQpsAndDurationDigits(sequence TestSequence) (maxQpsDigits, maxDurationDigits int) { + for _, item := range sequence { + qpsDigits := countDigits(item.QPS) + if qpsDigits > maxQpsDigits { + maxQpsDigits = qpsDigits + } + durationDigits := countDigits(item.Duration) + if durationDigits > maxDurationDigits { + maxDurationDigits = durationDigits + } + } + return +} + +type ResultFormat struct { + maxRepetitionDigits, maxQpsDigits, maxDurationDigits int +} + +// Part 4: Results processing + +// processResults processes the vegeta metrics and generates reports +func (pt *PerfTest) processResults(testNumber, repetition int, daemonName, qpsValue, duration string, metrics *vegeta.Metrics) error { + // Extract latency values + minLatency := FormatDuration(metrics.Latencies.Min) + mean := FormatDuration(metrics.Latencies.Mean) + p50 := FormatDuration(metrics.Latencies.P50) + p90 := FormatDuration(metrics.Latencies.P90) + p95 := FormatDuration(metrics.Latencies.P95) + p99 := FormatDuration(metrics.Latencies.P99) + maxLatency := FormatDuration(metrics.Latencies.Max) + + // Calculate success ratio + successRatio := fmt.Sprintf("%.2f%%", metrics.Success*100) + + // Check for errors + errorMsg := "" + if len(metrics.Errors) > 0 { + // Collect unique error messages + errorMap := make(map[string]int) + for _, err := range metrics.Errors { + errorMap[err]++ + } + + for errStr, count := range errorMap { + if errorMsg != "" { + errorMsg += "; " + } + errorMsg += fmt.Sprintf("%s (x%d)", errStr, count) + } + } + + // Print results + if errorMsg != "" { + fmt.Printf("=%7s lat=[max=%8s] error=%s\n", successRatio, maxLatency, errorMsg) + } else { + if pt.config.MorePercentiles { + fmt.Printf("success=%7s lat=[p50=%8s p90=%8s p95=%8s p99=%8s max=%8s]\n", + successRatio, p50, p90, p95, p99, maxLatency) + } else { + fmt.Printf("success=%7s lat=[max=%8s]\n", successRatio, maxLatency) + } + } + + // Check for failures + if errorMsg != "" && pt.config.HaltOnVegetaError { + return fmt.Errorf("test failed: %s", errorMsg) + } + + if successRatio != "100.00%" { + return fmt.Errorf("test failed: ratio is not 100.00%%") + } + + // Write to the test report if enabled + if pt.config.CreateTestReport { + testMetrics := &TestMetrics{ + DaemonName: daemonName, + TestNumber: testNumber, + Repetition: repetition, + QPS: qpsValue, + Duration: duration, + MinLatency: minLatency, + Mean: mean, + P50: p50, + P90: p90, + P95: p95, + P99: p99, + MaxLatency: maxLatency, + SuccessRatio: successRatio, + Error: errorMsg, + VegetaMetrics: metrics, + } + + if err := pt.testReport.WriteTestReport(testMetrics); err != nil { + return fmt.Errorf("failed to write test report: %w", err) + } + } + + // Print instant report if enabled + if pt.config.InstantReport { + pt.printInstantReport(metrics) + } + + return nil +} + +// printInstantReport prints detailed metrics to the console +func (pt *PerfTest) printInstantReport(metrics *vegeta.Metrics) { + fmt.Println("\n=== Detailed Metrics ===") + fmt.Printf("Requests: %d\n", metrics.Requests) + fmt.Printf("Duration: %v\n", metrics.Duration) + fmt.Printf("Rate: %.2f req/s\n", metrics.Rate) + fmt.Printf("Throughput: %.2f req/s\n", metrics.Throughput) + fmt.Printf("Success: %.2f%%\n", metrics.Success*100) + + fmt.Println("\nLatencies:") + fmt.Printf(" Min: %v\n", metrics.Latencies.Min) + fmt.Printf(" Mean: %v\n", metrics.Latencies.Mean) + fmt.Printf(" P50: %v\n", metrics.Latencies.P50) + fmt.Printf(" P90: %v\n", metrics.Latencies.P90) + fmt.Printf(" P95: %v\n", metrics.Latencies.P95) + fmt.Printf(" P99: %v\n", metrics.Latencies.P99) + fmt.Printf(" Max: %v\n", metrics.Latencies.Max) + + fmt.Println("\nStatus Codes:") + for code, count := range metrics.StatusCodes { + fmt.Printf(" %s: %d\n", code, count) + } + + if len(metrics.Errors) > 0 { + fmt.Println("\nErrors:") + errorMap := make(map[string]int) + for _, err := range metrics.Errors { + errorMap[err]++ + } + for errStr, count := range errorMap { + fmt.Printf(" %s: %d\n", errStr, count) + } + } + + fmt.Print("========================\n\n") +} + +// generateHdrPlot generates HDR histogram plot data +func (pt *PerfTest) generateHdrPlot(binaryFile string) (string, error) { + // Read the binary file + file, err := os.Open(binaryFile) + if err != nil { + return "", err + } + defer func(file *os.File) { + err := file.Close() + if err != nil { + log.Printf("Warning: failed to close file: %v", err) + } + }(file) + + // Decode results + dec := vegeta.NewDecoder(file) + + // Create metrics + var metrics vegeta.Metrics + for { + var result vegeta.Result + if err := dec.Decode(&result); err != nil { + if err == io.EOF { + break + } + return "", err + } + metrics.Add(&result) + } + metrics.Close() + + // Generate HDR histogram + var buf bytes.Buffer + histogram := metrics.Histogram + if histogram != nil { + // Print histogram data + for i, bucket := range histogram.Buckets { + _, err := fmt.Fprintf(&buf, "%.6f %d\n", float64(bucket), histogram.Counts[i]) + if err != nil { + return "", err + } + } + } + + return buf.String(), nil +} + +// generateJSONReport generates a JSON report from the binary file +func (pt *PerfTest) generateJSONReport(binaryFile string) (map[string]interface{}, error) { + // Read the binary file + file, err := os.Open(binaryFile) + if err != nil { + return nil, err + } + defer func(file *os.File) { + err := file.Close() + if err != nil { + log.Printf("Warning: failed to close file: %v", err) + } + }(file) + + // Decode results + dec := vegeta.NewDecoder(file) + + // Create metrics + var metrics vegeta.Metrics + for { + var result vegeta.Result + if err := dec.Decode(&result); err != nil { + if err == io.EOF { + break + } + return nil, err + } + metrics.Add(&result) + } + metrics.Close() + + // Convert metrics to map + report := map[string]interface{}{ + "requests": metrics.Requests, + "duration": metrics.Duration.Seconds(), + "rate": metrics.Rate, + "throughput": metrics.Throughput, + "success": metrics.Success, + "latencies": map[string]interface{}{ + "min": metrics.Latencies.Min.Seconds(), + "mean": metrics.Latencies.Mean.Seconds(), + "p50": metrics.Latencies.P50.Seconds(), + "p90": metrics.Latencies.P90.Seconds(), + "p95": metrics.Latencies.P95.Seconds(), + "p99": metrics.Latencies.P99.Seconds(), + "max": metrics.Latencies.Max.Seconds(), + }, + "status_codes": metrics.StatusCodes, + "errors": metrics.Errors, + } + + return report, nil +} + +// Part 5: TestReport implementation + +// TestReport manages CSV and JSON report generation +type TestReport struct { + config *Config + csvFile *os.File + csvWriter *csv.Writer + jsonReport *JSONReport + hardware *Hardware + currentTestIdx int +} + +// NewTestReport creates a new test report instance +func NewTestReport(config *Config) *TestReport { + return &TestReport{ + config: config, + hardware: &Hardware{}, + currentTestIdx: -1, + } +} + +// Open initialises the test report and writes headers +func (tr *TestReport) Open() error { + if err := tr.createCSVFile(); err != nil { + return fmt.Errorf("failed to create CSV file: %w", err) + } + + // Collect system information + checksum := GetFileChecksum(tr.config.VegetaPatternTarFile) + gccVersion := GetGCCVersion() + goVersion := GetGoVersion() + kernelVersion := GetKernelVersion() + cpuModel := tr.hardware.GetCPUModel() + bogomips := tr.hardware.GetBogomips() + + var silkrpcCommit, erigonCommit string + if tr.config.TestMode == "1" || tr.config.TestMode == "3" { + silkrpcCommit = GetGitCommit(tr.config.SilkwormDir) + } + if tr.config.TestMode == "2" || tr.config.TestMode == "3" { + erigonCommit = GetGitCommit(tr.config.ErigonDir) + } + + // Write headers + if err := tr.writeTestHeader(cpuModel, bogomips, kernelVersion, checksum, + gccVersion, goVersion, silkrpcCommit, erigonCommit); err != nil { + return fmt.Errorf("failed to write test header: %w", err) + } + + // Initialise the JSON report if needed + if tr.config.JSONReportFile != "" { + tr.initializeJSONReport(cpuModel, bogomips, kernelVersion, checksum, + gccVersion, goVersion, silkrpcCommit, erigonCommit) + } + + return nil +} + +// createCSVFile creates the CSV report file with appropriate naming +func (tr *TestReport) createCSVFile() error { + // Determine folder extension + extension := tr.hardware.NormalizedProduct() + if extension == "systemproductname" { + extension = tr.hardware.NormalizedBoard() + } + + // Create the folder path + csvFolder := tr.hardware.NormalizedVendor() + "_" + extension + var csvFolderPath string + if tr.config.VersionedTestReport { + csvFolderPath = filepath.Join("./reports", tr.config.ChainName, csvFolder) + } else { + csvFolderPath = filepath.Join(RunTestDirname, tr.config.ChainName, csvFolder) + } + + if err := os.MkdirAll(csvFolderPath, 0755); err != nil { + return fmt.Errorf("failed to create CSV folder: %w", err) + } + + // Generate CSV filename + timestamp := time.Now().Format("20060102150405") + var csvFilename string + if tr.config.TestingDaemon != "" { + csvFilename = fmt.Sprintf("%s_%s_%s_perf.csv", + tr.config.TestType, timestamp, tr.config.TestingDaemon) + } else { + csvFilename = fmt.Sprintf("%s_%s_perf.csv", + tr.config.TestType, timestamp) + } + + csvFilepath := filepath.Join(csvFolderPath, csvFilename) + + // Create and open the CSV report file + file, err := os.Create(csvFilepath) + if err != nil { + return fmt.Errorf("failed to create CSV file: %w", err) + } + + tr.csvFile = file + tr.csvWriter = csv.NewWriter(file) + + fmt.Printf("Perf report file: %s\n\n", csvFilepath) + + return nil +} + +// writeTestHeader writes the test configuration header to CSV +func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksum, + gccVersion, goVersion, silkrpcCommit, erigonCommit string) error { + + // Write platform information + emptyRow := make([]string, 14) + + err := tr.csvWriter.Write(append(emptyRow[:12], "vendor", tr.hardware.Vendor())) + if err != nil { + return err + } + + product := tr.hardware.Product() + if product != "System Product Name" { + err := tr.csvWriter.Write(append(emptyRow[:12], "product", product)) + if err != nil { + return err + } + } else { + err := tr.csvWriter.Write(append(emptyRow[:12], "board", tr.hardware.Board())) + if err != nil { + return err + } + } + + err = tr.csvWriter.Write(append(emptyRow[:12], "cpu", cpuModel)) + if err != nil { + return err + } + err = tr.csvWriter.Write(append(emptyRow[:12], "bogomips", bogomips)) + if err != nil { + return err + } + err = tr.csvWriter.Write(append(emptyRow[:12], "kernel", kernelVersion)) + if err != nil { + return err + } + err = tr.csvWriter.Write(append(emptyRow[:12], "taskset", tr.config.DaemonVegetaOnCore)) + if err != nil { + return err + } + err = tr.csvWriter.Write(append(emptyRow[:12], "vegetaFile", tr.config.VegetaPatternTarFile)) + if err != nil { + return err + } + err = tr.csvWriter.Write(append(emptyRow[:12], "vegetaChecksum", checksum)) + if err != nil { + return err + } + err = tr.csvWriter.Write(append(emptyRow[:12], "gccVersion", gccVersion)) + if err != nil { + return err + } + err = tr.csvWriter.Write(append(emptyRow[:12], "goVersion", goVersion)) + if err != nil { + return err + } + err = tr.csvWriter.Write(append(emptyRow[:12], "silkrpcVersion", silkrpcCommit)) + if err != nil { + return err + } + err = tr.csvWriter.Write(append(emptyRow[:12], "erigonVersion", erigonCommit)) + if err != nil { + return err + } + + // Empty rows + for range 2 { + err := tr.csvWriter.Write([]string{}) + if err != nil { + return err + } + } + + // Write column headers + headers := []string{ + "Daemon", "TestNo", "Repetition", "Qps", "Time(secs)", + "Min", "Mean", "50", "90", "95", "99", "Max", "Ratio", "Error", + } + err = tr.csvWriter.Write(headers) + if err != nil { + return err + } + tr.csvWriter.Flush() + + return tr.csvWriter.Error() +} + +// initializeJSONReport initializes the JSON report structure +func (tr *TestReport) initializeJSONReport(cpuModel, bogomips, kernelVersion, checksum, + gccVersion, goVersion, silkrpcCommit, erigonCommit string) { + + tr.jsonReport = &JSONReport{ + Platform: PlatformInfo{ + Vendor: strings.TrimSpace(tr.hardware.Vendor()), + Product: strings.TrimSpace(tr.hardware.Product()), + Board: strings.TrimSpace(tr.hardware.Board()), + CPU: strings.TrimSpace(cpuModel), + Bogomips: strings.TrimSpace(bogomips), + Kernel: strings.TrimSpace(kernelVersion), + GCCVersion: strings.TrimSpace(gccVersion), + GoVersion: strings.TrimSpace(goVersion), + SilkrpcCommit: strings.TrimSpace(silkrpcCommit), + ErigonCommit: strings.TrimSpace(erigonCommit), + }, + Configuration: ConfigurationInfo{ + TestingDaemon: tr.config.TestingDaemon, + TestingAPI: tr.config.TestType, + TestSequence: tr.config.TestSequence, + TestRepetitions: tr.config.Repetitions, + VegetaFile: tr.config.VegetaPatternTarFile, + VegetaChecksum: checksum, + Taskset: tr.config.DaemonVegetaOnCore, + }, + Results: []TestResult{}, + } +} + +// WriteTestReport writes a test result to the report +func (tr *TestReport) WriteTestReport(metrics *TestMetrics) error { + // Write to CSV + row := []string{ + metrics.DaemonName, + strconv.Itoa(metrics.TestNumber), + strconv.Itoa(metrics.Repetition), + metrics.QPS, + metrics.Duration, + metrics.MinLatency, + metrics.Mean, + metrics.P50, + metrics.P90, + metrics.P95, + metrics.P99, + metrics.MaxLatency, + metrics.SuccessRatio, + metrics.Error, + } + + if err := tr.csvWriter.Write(row); err != nil { + return fmt.Errorf("failed to write CSV row: %w", err) + } + tr.csvWriter.Flush() + + // Write to JSON if enabled + if tr.config.JSONReportFile != "" { + if err := tr.writeTestReportToJSON(metrics); err != nil { + return fmt.Errorf("failed to write JSON report: %w", err) + } + } + + return nil +} + +// writeTestReportToJSON writes a test result to the JSON report +func (tr *TestReport) writeTestReportToJSON(metrics *TestMetrics) error { + // Check if we need to create a new test result entry + if metrics.Repetition == 0 { + tr.currentTestIdx++ + tr.jsonReport.Results = append(tr.jsonReport.Results, TestResult{ + QPS: strings.TrimSpace(metrics.QPS), + Duration: strings.TrimSpace(metrics.Duration), + TestRepetitions: []RepetitionInfo{}, + }) + } + + // Generate JSON report from the binary file + jsonReportData, err := tr.generateJSONReport(tr.config.BinaryFileFullPathname) + if err != nil { + return fmt.Errorf("failed to generate JSON report: %w", err) + } + + // Generate HDR plot + hdrPlot, err := tr.generateHdrPlot(tr.config.BinaryFileFullPathname) + if err != nil { + return fmt.Errorf("failed to generate HDR plot: %w", err) + } + + // Add repetition info + repetitionInfo := RepetitionInfo{ + VegetaBinary: tr.config.BinaryFile, + VegetaReport: jsonReportData, + VegetaReportHdrPlot: hdrPlot, + } + + if tr.currentTestIdx >= 0 && tr.currentTestIdx < len(tr.jsonReport.Results) { + tr.jsonReport.Results[tr.currentTestIdx].TestRepetitions = append( + tr.jsonReport.Results[tr.currentTestIdx].TestRepetitions, + repetitionInfo, + ) + } + + return nil +} + +// generateJSONReport generates a JSON report from the binary file +func (tr *TestReport) generateJSONReport(binaryFile string) (map[string]interface{}, error) { + // Read the binary file + file, err := os.Open(binaryFile) + if err != nil { + return nil, err + } + defer func(file *os.File) { + err := file.Close() + if err != nil { + log.Printf("Warning: failed to close file: %v", err) + } + }(file) + + // Decode results + dec := vegeta.NewDecoder(file) + + // Create metrics + var metrics vegeta.Metrics + for { + var result vegeta.Result + if err := dec.Decode(&result); err != nil { + if err == io.EOF { + break + } + return nil, err + } + metrics.Add(&result) + } + metrics.Close() + + // Convert metrics to map + report := map[string]interface{}{ + "requests": metrics.Requests, + "duration": metrics.Duration.Seconds(), + "rate": metrics.Rate, + "throughput": metrics.Throughput, + "success": metrics.Success, + "latencies": map[string]interface{}{ + "min": metrics.Latencies.Min.Seconds(), + "mean": metrics.Latencies.Mean.Seconds(), + "p50": metrics.Latencies.P50.Seconds(), + "p90": metrics.Latencies.P90.Seconds(), + "p95": metrics.Latencies.P95.Seconds(), + "p99": metrics.Latencies.P99.Seconds(), + "max": metrics.Latencies.Max.Seconds(), + }, + "status_codes": metrics.StatusCodes, + "errors": metrics.Errors, + } + + return report, nil +} + +// generateHdrPlot generates HDR histogram plot data +func (tr *TestReport) generateHdrPlot(binaryFile string) (string, error) { + // Read the binary file + file, err := os.Open(binaryFile) + if err != nil { + return "", err + } + defer func(file *os.File) { + err := file.Close() + if err != nil { + log.Printf("Warning: failed to close file: %v", err) + } + }(file) + + // Decode results + dec := vegeta.NewDecoder(file) + + // Create metrics + var metrics vegeta.Metrics + for { + var result vegeta.Result + if err := dec.Decode(&result); err != nil { + if err == io.EOF { + break + } + return "", err + } + metrics.Add(&result) + } + metrics.Close() + + // Generate HDR histogram + var buf bytes.Buffer + histogram := metrics.Histogram + if histogram != nil { + // Print histogram data + for i, bucket := range histogram.Buckets { + _, err := fmt.Fprintf(&buf, "%.6f %d\n", float64(bucket), histogram.Counts[i]) + if err != nil { + return "", err + } + } + } + + return buf.String(), nil +} + +// Close finalises and closes the test report +func (tr *TestReport) Close() error { + // Flush and close the CSV file + if tr.csvWriter != nil { + tr.csvWriter.Flush() + if err := tr.csvWriter.Error(); err != nil { + log.Printf("CSV writer error: %v", err) + } + } + + if tr.csvFile != nil { + if err := tr.csvFile.Close(); err != nil { + return fmt.Errorf("failed to close CSV file: %w", err) + } + } + + // Write the JSON report if enabled + if tr.config.JSONReportFile != "" && tr.jsonReport != nil { + fmt.Printf("Create json file: %s\n", tr.config.JSONReportFile) + + jsonData, err := json.MarshalIndent(tr.jsonReport, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal JSON report: %w", err) + } + + if err := os.WriteFile(tr.config.JSONReportFile, jsonData, 0644); err != nil { + return fmt.Errorf("failed to write JSON report: %w", err) + } + } + + return nil +} + +// Part 6: CLI and main function + +func main() { + app := &cli.App{ + Name: "rpc_perf", + Usage: "Launch an automated sequence of RPC performance tests on on target blockchain node(s)", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "not-verify-server-alive", + Aliases: []string{"Z"}, + Usage: "Don't verify server is still active", + }, + &cli.BoolFlag{ + Name: "tmp-test-report", + Aliases: []string{"R"}, + Usage: "Generate report in tmp directory", + }, + &cli.BoolFlag{ + Name: "test-report", + Aliases: []string{"u"}, + Usage: "Generate report in reports area ready for Git repo", + }, + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + Usage: "Enable verbose output", + }, + &cli.BoolFlag{ + Name: "tracing", + Aliases: []string{"x"}, + Usage: "Enable verbose and tracing output", + }, + &cli.BoolFlag{ + Name: "empty-cache", + Aliases: []string{"e"}, + Usage: "Empty OS cache before each test", + }, + &cli.StringFlag{ + Name: "max-connections", + Aliases: []string{"C"}, + Value: DefaultMaxConn, + Usage: "Maximum number of connections", + }, + &cli.StringFlag{ + Name: "testing-daemon", + Aliases: []string{"D"}, + Usage: "Name of testing daemon", + }, + &cli.StringFlag{ + Name: "blockchain", + Aliases: []string{"b"}, + Value: "mainnet", + Usage: "Blockchain network name", + }, + &cli.StringFlag{ + Name: "test-type", + Aliases: []string{"y"}, + Value: DefaultTestType, + Usage: "Test type (e.g., eth_call, eth_getLogs)", + }, + &cli.StringFlag{ + Name: "test-mode", + Aliases: []string{"m"}, + Value: DefaultTestMode, + Usage: "Test mode: silkworm(1), erigon(2), both(3)", + }, + &cli.StringFlag{ + Name: "pattern-file", + Aliases: []string{"p"}, + Value: DefaultVegetaPatternTarFile, + Usage: "Path to the Vegeta attack pattern file", + }, + &cli.IntFlag{ + Name: "repetitions", + Aliases: []string{"r"}, + Value: DefaultRepetitions, + Usage: "Number of repetitions for each test in sequence", + }, + &cli.StringFlag{ + Name: "test-sequence", + Aliases: []string{"t"}, + Value: DefaultTestSequence, + Usage: "Test sequence as qps:duration,... (e.g., 200:30,400:10)", + }, + &cli.IntFlag{ + Name: "wait-after-test-sequence", + Aliases: []string{"w"}, + Value: DefaultWaitingTime, + Usage: "Wait time between test iterations in seconds", + }, + &cli.StringFlag{ + Name: "rpc-daemon-address", + Aliases: []string{"d"}, + Value: DefaultErigonAddress, + Usage: "RPC daemon address (e.g., 192.2.3.1)", + }, + &cli.StringFlag{ + Name: "erigon-dir", + Aliases: []string{"g"}, + Value: DefaultErigonBuildDir, + Usage: "Path to Erigon folder", + }, + &cli.StringFlag{ + Name: "silk-dir", + Aliases: []string{"s"}, + Value: DefaultSilkwormBuildDir, + Usage: "Path to Silkworm folder", + }, + &cli.StringFlag{ + Name: "run-vegeta-on-core", + Aliases: []string{"c"}, + Value: DefaultDaemonVegetaOnCore, + Usage: "Taskset format for Vegeta (e.g., 0-1:2-3)", + }, + &cli.StringFlag{ + Name: "response-timeout", + Aliases: []string{"T"}, + Value: DefaultVegetaResponseTimeout, + Usage: "Vegeta response timeout", + }, + &cli.StringFlag{ + Name: "max-body-rsp", + Aliases: []string{"M"}, + Value: DefaultMaxBodyRsp, + Usage: "Max bytes to read from response bodies", + }, + &cli.StringFlag{ + Name: "json-report", + Aliases: []string{"j"}, + Usage: "Generate JSON report at specified path", + }, + &cli.BoolFlag{ + Name: "more-percentiles", + Aliases: []string{"P"}, + Usage: "Print more percentiles in console report", + }, + &cli.BoolFlag{ + Name: "halt-on-vegeta-error", + Aliases: []string{"H"}, + Usage: "Consider test failed if Vegeta reports any error", + }, + &cli.BoolFlag{ + Name: "instant-report", + Aliases: []string{"I"}, + Usage: "Print instant Vegeta report for each test", + }, + }, + Action: runPerfTests, + } + + if err := app.Run(os.Args); err != nil { + log.Fatal(err) + } +} + +func runPerfTests(c *cli.Context) error { + fmt.Println("Performance Test started") + + // Create configuration from CLI flags + config := NewConfig() + + config.CheckServerAlive = !c.Bool("not-verify-server-alive") + config.CreateTestReport = c.Bool("tmp-test-report") || c.Bool("test-report") + config.VersionedTestReport = c.Bool("test-report") + config.Verbose = c.Bool("verbose") || c.Bool("tracing") + config.Tracing = c.Bool("tracing") + config.EmptyCache = c.Bool("empty-cache") + + config.MaxConnection = c.String("max-connections") + config.TestingDaemon = c.String("testing-daemon") + config.ChainName = c.String("blockchain") + config.TestType = c.String("test-type") + config.TestMode = c.String("test-mode") + config.VegetaPatternTarFile = c.String("pattern-file") + config.Repetitions = c.Int("repetitions") + config.TestSequence = c.String("test-sequence") + config.WaitingTime = c.Int("wait-after-test-sequence") + config.RPCDaemonAddress = c.String("rpc-daemon-address") + config.ErigonDir = c.String("erigon-dir") + config.SilkwormDir = c.String("silk-dir") + config.DaemonVegetaOnCore = c.String("run-vegeta-on-core") + config.VegetaResponseTimeout = c.String("response-timeout") + config.MaxBodyRsp = c.String("max-body-rsp") + config.JSONReportFile = c.String("json-report") + config.MorePercentiles = c.Bool("more-percentiles") + config.HaltOnVegetaError = c.Bool("halt-on-vegeta-error") + config.InstantReport = c.Bool("instant-report") + + // Validate configuration + if err := config.Validate(); err != nil { + return fmt.Errorf("configuration validation failed: %w", err) + } + + // Parse test sequence + sequence, err := ParseTestSequence(config.TestSequence) + if err != nil { + return fmt.Errorf("failed to parse test sequence: %w", err) + } + + // Create the test report + testReport := NewTestReport(config) + + // Create the performance test + perfTest, err := NewPerfTest(config, testReport) + if err != nil { + return fmt.Errorf("failed to initialize performance test: %w", err) + } + defer func(perfTest *PerfTest, initial bool) { + err := perfTest.Cleanup(initial) + if err != nil { + log.Printf("Failed to cleanup: %v", err) + } + }(perfTest, false) + + // Print test configuration + fmt.Printf("Test repetitions: %d on sequence: %s for pattern: %s\n", + config.Repetitions, config.TestSequence, config.VegetaPatternTarFile) + + // Open the test report if needed + if config.CreateTestReport { + if err := testReport.Open(); err != nil { + return fmt.Errorf("failed to open test report: %w", err) + } + defer func(testReport *TestReport) { + err := testReport.Close() + if err != nil { + log.Printf("Failed to close test report: %v", err) + } + }(testReport) + } + + // Create context + ctx := context.Background() + + // Run tests based on test mode + if config.TestMode == "1" || config.TestMode == "3" { + fmt.Println("Testing Silkworm...") + if err := perfTest.ExecuteSequence(ctx, sequence, Silkworm); err != nil { + fmt.Printf("Server dead test Aborted! Error: %v\n", err) + return err + } + + if config.TestMode == "3" { + fmt.Println("--------------------------------------------------------------------------------------------") + } + } + + if config.TestMode == "2" || config.TestMode == "3" { + fmt.Println("Testing Erigon...") + if err := perfTest.ExecuteSequence(ctx, sequence, Erigon); err != nil { + fmt.Printf("Server dead test Aborted! Error: %v\n", err) + return err + } + } + + fmt.Println("Performance Test completed successfully.") + return nil +} From 02cd75625d0c8ac8075ad7bcb475f15ed743f52d Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Tue, 9 Dec 2025 08:12:23 +0100 Subject: [PATCH 06/87] integration: v2 migration to go --- cmd/perf/main.go | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/cmd/perf/main.go b/cmd/perf/main.go index 1bbd0e2b..7258abef 100644 --- a/cmd/perf/main.go +++ b/cmd/perf/main.go @@ -975,13 +975,8 @@ func (pt *PerfTest) ExecuteSequence(ctx context.Context, sequence []TestSequence for _, test := range sequence { for rep := 0; rep < pt.config.Repetitions; rep++ { if test.QPS > 0 { - err := pt.Execute(ctx, testNumber, rep, tag, - strconv.Itoa(test.QPS), - strconv.Itoa(test.Duration), - resultFormat) - + err := pt.Execute(ctx, testNumber, rep, tag, strconv.Itoa(test.QPS), strconv.Itoa(test.Duration), resultFormat) if err != nil { - fmt.Printf("Server dead test Aborted! Error: %v\n", err) return err } } else { @@ -1053,25 +1048,35 @@ func (pt *PerfTest) processResults(testNumber, repetition int, daemonName, qpsVa errorMap[err]++ } + const MaxErrorsToDisplay = 1 + errorsToDisplay := 0 for errStr, count := range errorMap { + if errorsToDisplay >= MaxErrorsToDisplay { + break + } if errorMsg != "" { errorMsg += "; " } errorMsg += fmt.Sprintf("%s (x%d)", errStr, count) + errorsToDisplay++ + } + if errorsToDisplay < len(errorMap) { + errorMsg += fmt.Sprintf(" (+%d more)", len(errorMap)-errorsToDisplay) } } // Print results - if errorMsg != "" { - fmt.Printf("=%7s lat=[max=%8s] error=%s\n", successRatio, maxLatency, errorMsg) + var resultRecord string + if pt.config.MorePercentiles { + resultRecord = fmt.Sprintf("success=%7s lat=[p50=%8s p90=%8s p95=%8s p99=%8s max=%8s]", + successRatio, p50, p90, p95, p99, maxLatency) } else { - if pt.config.MorePercentiles { - fmt.Printf("success=%7s lat=[p50=%8s p90=%8s p95=%8s p99=%8s max=%8s]\n", - successRatio, p50, p90, p95, p99, maxLatency) - } else { - fmt.Printf("success=%7s lat=[max=%8s]\n", successRatio, maxLatency) - } + resultRecord = fmt.Sprintf("success=%7s lat=[max=%8s]", successRatio, maxLatency) + } + if errorMsg != "" { + resultRecord += fmt.Sprintf(" error=%s", errorMsg) } + fmt.Println(resultRecord) // Check for failures if errorMsg != "" && pt.config.HaltOnVegetaError { @@ -1925,7 +1930,7 @@ func runPerfTests(c *cli.Context) error { if config.TestMode == "1" || config.TestMode == "3" { fmt.Println("Testing Silkworm...") if err := perfTest.ExecuteSequence(ctx, sequence, Silkworm); err != nil { - fmt.Printf("Server dead test Aborted! Error: %v\n", err) + fmt.Printf("Performance Test failed, error: %v\n", err) return err } @@ -1937,7 +1942,7 @@ func runPerfTests(c *cli.Context) error { if config.TestMode == "2" || config.TestMode == "3" { fmt.Println("Testing Erigon...") if err := perfTest.ExecuteSequence(ctx, sequence, Erigon); err != nil { - fmt.Printf("Server dead test Aborted! Error: %v\n", err) + fmt.Printf("Performance Test failed, error: %v\n", err) return err } } From b00952dee890838949713fda315d6a1d7a8f4b46 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Tue, 9 Dec 2025 08:56:58 +0100 Subject: [PATCH 07/87] integration: v2 migration to go --- cmd/perf/main.go | 164 ++++++----------------------------------------- 1 file changed, 21 insertions(+), 143 deletions(-) diff --git a/cmd/perf/main.go b/cmd/perf/main.go index 7258abef..0c65944f 100644 --- a/cmd/perf/main.go +++ b/cmd/perf/main.go @@ -227,8 +227,8 @@ type TestMetrics struct { DaemonName string TestNumber int Repetition int - QPS string - Duration string + QPS int + Duration int MinLatency string Mean string P50 string @@ -275,8 +275,8 @@ type ConfigurationInfo struct { // TestResult holds results for a single QPS/duration test type TestResult struct { - QPS string `json:"qps"` - Duration string `json:"duration"` + QPS int `json:"qps"` + Duration int `json:"duration"` TestRepetitions []RepetitionInfo `json:"testRepetitions"` } @@ -287,8 +287,6 @@ type RepetitionInfo struct { VegetaReportHdrPlot string `json:"vegetaReportHdrPlot"` } -// Part 2: Hardware utilities and helper functions - // Hardware provides methods to extract hardware information type Hardware struct{} @@ -508,8 +506,6 @@ func ParseLatency(latency string) string { return strings.TrimSpace(latency) } -// Part 3: PerfTest implementation - // PerfTest manages performance test execution type PerfTest struct { config *Config @@ -727,7 +723,7 @@ func (pt *PerfTest) replaceInFile(filepath, old, new string) error { } // Execute runs a single performance test -func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, name, qpsValue, duration string, format ResultFormat) error { +func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, name string, qps, duration int, format ResultFormat) error { // Empty cache if configured if pt.config.EmptyCache { if err := EmptyCache(); err != nil { @@ -745,12 +741,12 @@ func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, nam // Create the binary file name timestamp := time.Now().Format("20060102150405") - pt.config.BinaryFile = fmt.Sprintf("%s_%s_%s_%s_%s_%s_%d.bin", + pt.config.BinaryFile = fmt.Sprintf("%s_%s_%s_%s_%d_%d_%d.bin", timestamp, pt.config.ChainName, pt.config.TestingDaemon, pt.config.TestType, - qpsValue, + qps, duration, repetition+1) @@ -773,11 +769,11 @@ func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, nam maxQpsDigits := strconv.Itoa(format.maxQpsDigits) maxDurationDigits := strconv.Itoa(format.maxDurationDigits) if pt.config.TestingDaemon != "" { - fmt.Printf("[%d.%"+maxRepetitionDigits+"d] %s: executes test qps: %"+maxQpsDigits+"s time: %"+maxDurationDigits+"s -> ", - testNumber, repetition+1, pt.config.TestingDaemon, qpsValue, duration) + fmt.Printf("[%d.%"+maxRepetitionDigits+"d] %s: executes test qps: %"+maxQpsDigits+"d time: %"+maxDurationDigits+"d -> ", + testNumber, repetition+1, pt.config.TestingDaemon, qps, duration) } else { - fmt.Printf("[%d.%"+maxRepetitionDigits+"d] daemon: executes test qps: %"+maxQpsDigits+"s time: %"+maxDurationDigits+"s -> ", - testNumber, repetition+1, qpsValue, duration) + fmt.Printf("[%d.%"+maxRepetitionDigits+"d] daemon: executes test qps: %"+maxQpsDigits+"d time: %"+maxDurationDigits+"d -> ", + testNumber, repetition+1, qps, duration) } // Load targets from pattern file @@ -786,19 +782,8 @@ func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, nam return fmt.Errorf("failed to load targets: %w", err) } - // Parse QPS and duration - qps, err := strconv.Atoi(qpsValue) - if err != nil { - return fmt.Errorf("invalid QPS value: %w", err) - } - - dur, err := strconv.Atoi(duration) - if err != nil { - return fmt.Errorf("invalid duration value: %w", err) - } - // Run vegeta attack - metrics, err := pt.runVegetaAttack(ctx, targets, qps, time.Duration(dur)*time.Second, pt.config.BinaryFileFullPathname) + metrics, err := pt.runVegetaAttack(ctx, targets, qps, time.Duration(duration)*time.Second, pt.config.BinaryFileFullPathname) if err != nil { return fmt.Errorf("vegeta attack failed: %w", err) } @@ -819,7 +804,7 @@ func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, nam } // Process results - return pt.processResults(testNumber, repetition, name, qpsValue, duration, metrics) + return pt.processResults(testNumber, repetition, name, qps, duration, metrics) } // loadTargets loads Vegeta targets from a pattern file @@ -975,7 +960,7 @@ func (pt *PerfTest) ExecuteSequence(ctx context.Context, sequence []TestSequence for _, test := range sequence { for rep := 0; rep < pt.config.Repetitions; rep++ { if test.QPS > 0 { - err := pt.Execute(ctx, testNumber, rep, tag, strconv.Itoa(test.QPS), strconv.Itoa(test.Duration), resultFormat) + err := pt.Execute(ctx, testNumber, rep, tag, test.QPS, test.Duration, resultFormat) if err != nil { return err } @@ -1023,10 +1008,8 @@ type ResultFormat struct { maxRepetitionDigits, maxQpsDigits, maxDurationDigits int } -// Part 4: Results processing - // processResults processes the vegeta metrics and generates reports -func (pt *PerfTest) processResults(testNumber, repetition int, daemonName, qpsValue, duration string, metrics *vegeta.Metrics) error { +func (pt *PerfTest) processResults(testNumber, repetition int, name string, qps, duration int, metrics *vegeta.Metrics) error { // Extract latency values minLatency := FormatDuration(metrics.Latencies.Min) mean := FormatDuration(metrics.Latencies.Mean) @@ -1090,10 +1073,10 @@ func (pt *PerfTest) processResults(testNumber, repetition int, daemonName, qpsVa // Write to the test report if enabled if pt.config.CreateTestReport { testMetrics := &TestMetrics{ - DaemonName: daemonName, + DaemonName: name, TestNumber: testNumber, Repetition: repetition, - QPS: qpsValue, + QPS: qps, Duration: duration, MinLatency: minLatency, Mean: mean, @@ -1157,109 +1140,6 @@ func (pt *PerfTest) printInstantReport(metrics *vegeta.Metrics) { fmt.Print("========================\n\n") } -// generateHdrPlot generates HDR histogram plot data -func (pt *PerfTest) generateHdrPlot(binaryFile string) (string, error) { - // Read the binary file - file, err := os.Open(binaryFile) - if err != nil { - return "", err - } - defer func(file *os.File) { - err := file.Close() - if err != nil { - log.Printf("Warning: failed to close file: %v", err) - } - }(file) - - // Decode results - dec := vegeta.NewDecoder(file) - - // Create metrics - var metrics vegeta.Metrics - for { - var result vegeta.Result - if err := dec.Decode(&result); err != nil { - if err == io.EOF { - break - } - return "", err - } - metrics.Add(&result) - } - metrics.Close() - - // Generate HDR histogram - var buf bytes.Buffer - histogram := metrics.Histogram - if histogram != nil { - // Print histogram data - for i, bucket := range histogram.Buckets { - _, err := fmt.Fprintf(&buf, "%.6f %d\n", float64(bucket), histogram.Counts[i]) - if err != nil { - return "", err - } - } - } - - return buf.String(), nil -} - -// generateJSONReport generates a JSON report from the binary file -func (pt *PerfTest) generateJSONReport(binaryFile string) (map[string]interface{}, error) { - // Read the binary file - file, err := os.Open(binaryFile) - if err != nil { - return nil, err - } - defer func(file *os.File) { - err := file.Close() - if err != nil { - log.Printf("Warning: failed to close file: %v", err) - } - }(file) - - // Decode results - dec := vegeta.NewDecoder(file) - - // Create metrics - var metrics vegeta.Metrics - for { - var result vegeta.Result - if err := dec.Decode(&result); err != nil { - if err == io.EOF { - break - } - return nil, err - } - metrics.Add(&result) - } - metrics.Close() - - // Convert metrics to map - report := map[string]interface{}{ - "requests": metrics.Requests, - "duration": metrics.Duration.Seconds(), - "rate": metrics.Rate, - "throughput": metrics.Throughput, - "success": metrics.Success, - "latencies": map[string]interface{}{ - "min": metrics.Latencies.Min.Seconds(), - "mean": metrics.Latencies.Mean.Seconds(), - "p50": metrics.Latencies.P50.Seconds(), - "p90": metrics.Latencies.P90.Seconds(), - "p95": metrics.Latencies.P95.Seconds(), - "p99": metrics.Latencies.P99.Seconds(), - "max": metrics.Latencies.Max.Seconds(), - }, - "status_codes": metrics.StatusCodes, - "errors": metrics.Errors, - } - - return report, nil -} - -// Part 5: TestReport implementation - // TestReport manages CSV and JSON report generation type TestReport struct { config *Config @@ -1489,8 +1369,8 @@ func (tr *TestReport) WriteTestReport(metrics *TestMetrics) error { metrics.DaemonName, strconv.Itoa(metrics.TestNumber), strconv.Itoa(metrics.Repetition), - metrics.QPS, - metrics.Duration, + strconv.Itoa(metrics.QPS), + strconv.Itoa(metrics.Duration), metrics.MinLatency, metrics.Mean, metrics.P50, @@ -1523,8 +1403,8 @@ func (tr *TestReport) writeTestReportToJSON(metrics *TestMetrics) error { if metrics.Repetition == 0 { tr.currentTestIdx++ tr.jsonReport.Results = append(tr.jsonReport.Results, TestResult{ - QPS: strings.TrimSpace(metrics.QPS), - Duration: strings.TrimSpace(metrics.Duration), + QPS: metrics.QPS, + Duration: metrics.Duration, TestRepetitions: []RepetitionInfo{}, }) } @@ -1692,8 +1572,6 @@ func (tr *TestReport) Close() error { return nil } -// Part 6: CLI and main function - func main() { app := &cli.App{ Name: "rpc_perf", From 2861c494733f2bdd8e5efb38d6303fdef827895e Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Thu, 11 Dec 2025 10:36:58 +0100 Subject: [PATCH 08/87] integration: proper HTTP error handling --- cmd/integration/main.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index c3f6194f..881a5e4a 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1165,11 +1165,12 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t } }(resp.Body) - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { if verboseLevel > 1 { fmt.Printf("\npost result status_code: %d\n", resp.StatusCode) } - return nil, err + // TODO: add option to ignore HTTP errors and continue? + return nil, fmt.Errorf("failed: http status %v", resp.Status) } body, err := io.ReadAll(resp.Body) From 86a1ea87b48dec7253abc2200a210ac3959c2c3c Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 12 Dec 2025 10:47:09 +0100 Subject: [PATCH 09/87] integration: better JSONRPC parsing integration: better error handling --- cmd/integration/main.go | 117 +++++++++++++++++++++++++++++++++++----- 1 file changed, 103 insertions(+), 14 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 881a5e4a..5d3fb4b8 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1114,6 +1114,86 @@ func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response return nil } +const ( + identifierTag = "id" + jsonRpcTag = "jsonrpc" + resultTag = "result" + errorTag = "error" +) + +var ( + errJsonRpcUnexpectedFormat = errors.New("invalid JSON-RPC response format: neither object nor array") + errJsonRpcMissingVersion = errors.New("invalid JSON-RPC response: missing 'jsonrpc' field") + errJsonRpcMissingId = errors.New("invalid JSON-RPC response: missing 'id' field") + errJsonRpcNoncompliantVersion = errors.New("noncompliant JSON-RPC 2.0 version") + errJsonRpcMissingResultOrError = errors.New("JSON-RPC 2.0 response contains neither 'result' nor 'error'") + errJsonRpcContainsBothResultAndError = errors.New("JSON-RPC 2.0 response contains both 'result' and 'error'") +) + +// validateJsonRpcObject checks that the received response is a valid JSON-RPC object, according to 2.0 spec. +// This implies that the response must be a JSON object containing: +// - one mandatory "jsonrpc" field which must be equal to "2.0" +// - one mandatory "id" field which must match the value of the same field in the request +// - either one "result" field in case of success or one "error" field otherwise, mutually exclusive +// The strict parameter relaxes the compliance requirements by allowing both 'result' and 'error' to be present +// TODO: strict parameter is required for corner cases in streaming mode when 'result' is emitted up-front +// https://www.jsonrpc.org/specification +func validateJsonRpcObject(response map[string]any, strict bool) error { + // Ensure that the response is a valid JSON-RPC object. + jsonrpc, ok := response[jsonRpcTag] + if !ok { + return errJsonRpcMissingVersion + } + jsonrpcVersion, ok := jsonrpc.(string) + if jsonrpcVersion != "2.0" { + return errJsonRpcNoncompliantVersion + } + _, ok = response[identifierTag] + if !ok { + return errJsonRpcMissingId + } + _, hasResult := response[resultTag] + _, hasError := response[errorTag] + if !hasResult && !hasError { + return errJsonRpcMissingResultOrError + } + if strict && hasResult && hasError { + return errJsonRpcContainsBothResultAndError + } + return nil +} + +// validateJsonRpcResponse checks that the received response is a valid JSON-RPC message, according to 2.0 spec. +// This implies that the response must be either a valid JSON-RPC object, i.e. a JSON object containing at least +// "jsonrpc" and "id" fields or a JSON array where each element (if any) is in turn a valid JSON-RPC object. +func validateJsonRpcResponse(response any) error { + _, isArray := response.([]any) + responseAsMap, isMap := response.(map[string]any) + if !isArray && !isMap { + return errJsonRpcUnexpectedFormat + } + if isMap { + // Ensure that the response is a valid JSON-RPC object. + err := validateJsonRpcObject(responseAsMap, false) + if err != nil { + return err + } + } + if isArray { + for _, element := range response.([]any) { + elementAsMap, isElementMap := element.(map[string]any) + if !isElementMap { + return errJsonRpcUnexpectedFormat + } + err := validateJsonRpcObject(elementAsMap, false) + if err != nil { + return err + } + } + } + return nil +} + func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, target string, verboseLevel int) (any, error) { if transportType == "http" || transportType == "http_comp" || transportType == "https" { headers := map[string]string{ @@ -1169,8 +1249,8 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t if verboseLevel > 1 { fmt.Printf("\npost result status_code: %d\n", resp.StatusCode) } - // TODO: add option to ignore HTTP errors and continue? - return nil, fmt.Errorf("failed: http status %v", resp.Status) + // TODO: add option to stop on any HTTP error? + return nil, fmt.Errorf("http status %v", resp.Status) } body, err := io.ReadAll(resp.Body) @@ -1182,7 +1262,7 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t } if verboseLevel > 1 { - fmt.Printf("\npost result content: %s\n", string(body)) + fmt.Printf("\nhttp response body: %s\n", string(body)) } var result any @@ -1192,11 +1272,13 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t } return nil, err } + err = validateJsonRpcResponse(result) + if err != nil { + return nil, err + } if verboseLevel > 1 { - fmt.Printf("\ntarget: %s\n", target) - fmt.Printf("%s\n", requestDumps) - fmt.Printf("Response: %v\n", result) + fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, requestDumps, result) } return result, nil @@ -1248,11 +1330,13 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t } return nil, err } + err = validateJsonRpcResponse(result) + if err != nil { + return nil, err + } if verboseLevel > 1 { - fmt.Printf("\ntarget: %s\n", target) - fmt.Printf("%s\n", requestDumps) - fmt.Printf("Response: %v\n", result) + fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, requestDumps, result) } return result, nil @@ -1415,6 +1499,11 @@ func copyFile(src, dst string) (int64, error) { return nBytes, err } +var ( + errDiffTimeout = errors.New("diff timeout") + errDiffMismatch = errors.New("diff mismatch") +) + func compareJSON(config *Config, response interface{}, jsonFile, daemonFile, expRspFile, diffFile string, testNumber int) (bool, error) { baseName := filepath.Join(TempDirname, fmt.Sprintf("test_%d", testNumber)) err := os.MkdirAll(baseName, 0755) @@ -1497,9 +1586,9 @@ func compareJSON(config *Config, response interface{}, jsonFile, daemonFile, exp if diffFileSize != 0 || !diffResult { if !diffResult { - err = errors.New("failed timeout") + err = errDiffTimeout } else { - err = errors.New("failed") + err = errDiffMismatch } return false, err } @@ -1534,11 +1623,11 @@ func processResponse(target, target1 string, result, result1 interface{}, respon } if response == nil { - return false, errors.New("failed [" + config.DaemonUnderTest + "] (server doesn't respond)") + return false, errors.New("[" + config.DaemonUnderTest + "] (server doesn't respond)") } if expectedResponse == nil { - return false, errors.New("failed [" + config.DaemonAsReference + "] (server doesn't respond)") + return false, errors.New("[" + config.DaemonAsReference + "] (server doesn't respond)") } // Deep comparison @@ -1877,7 +1966,7 @@ func main() { } } else { failedTests++ - fmt.Printf("%s\n", result.Error.Error()) + fmt.Printf("failed: %s\n", result.Error.Error()) if config.ExitOnFail { // Signal other tasks to stop and exit cancelCtx() From 75e8de9b62d4436b199f0e7b9ccba713c39c53d8 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 12 Dec 2025 11:14:53 +0100 Subject: [PATCH 10/87] integration: improve response comparison --- cmd/integration/main.go | 117 ++++++++++++++++++++-------------------- 1 file changed, 59 insertions(+), 58 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 5d3fb4b8..6b6a0ba1 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1630,95 +1630,96 @@ func processResponse(target, target1 string, result, result1 interface{}, respon return false, errors.New("[" + config.DaemonAsReference + "] (server doesn't respond)") } - // Deep comparison + // Deep comparison between the received response and the expected response respJSON, _ := json.Marshal(response) expJSON, _ := json.Marshal(expectedResponse) - if string(respJSON) != string(expJSON) { - responseMap, respIsMap := response.(map[string]interface{}) - expectedMap, expIsMap := expectedResponse.(map[string]interface{}) - - // Check various conditions where we don't care about differences - if respIsMap && expIsMap { - _, responseHasResult := responseMap["result"] - expectedResult, expectedHasResult := expectedMap["result"] - _, responseHasError := responseMap["error"] - expectedError, expectedHasError := expectedMap["error"] - if responseHasResult && expectedHasResult && expectedResult == nil && result1 == nil { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) - if err != nil { - return false, err - } - return true, nil - } - if responseHasError && expectedHasError && expectedError == nil { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) - if err != nil { - return false, err - } - return true, nil - } - // TODO: improve len(expectedMap) == 2 which means: just "jsonrpc" and "id" are expected - if !expectedHasResult && !expectedHasError && len(expectedMap) == 2 { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) - if err != nil { - return false, err - } - return true, nil - } - if responseHasError && expectedHasError && config.DoNotCompareError { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) - if err != nil { - return false, err - } - return true, nil - } - } - - err := dumpJSONs(true, daemonFile, expRspFile, outputDir, response, expectedResponse) + // Fast path: if actual/expected are identical byte-wise, no need to compare them + if bytes.Equal(respJSON, expJSON) { + err = dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err } + return true, nil + } - same, err := compareJSON(config, response, jsonFile, daemonFile, expRspFile, diffFile, testNumber) - if err != nil { - return same, err - } - if same { - err := os.Remove(daemonFile) + // Check various conditions where we don't care about differences + responseMap, respIsMap := response.(map[string]interface{}) + expectedMap, expIsMap := expectedResponse.(map[string]interface{}) + + if respIsMap && expIsMap { + _, responseHasResult := responseMap["result"] + expectedResult, expectedHasResult := expectedMap["result"] + _, responseHasError := responseMap["error"] + expectedError, expectedHasError := expectedMap["error"] + if responseHasResult && expectedHasResult && expectedResult == nil && result1 == nil { + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err } - err = os.Remove(expRspFile) + return true, nil + } + if responseHasError && expectedHasError && expectedError == nil { + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err } - err = os.Remove(diffFile) + return true, nil + } + // TODO: improve len(expectedMap) == 2 which means: just "jsonrpc" and "id" are expected + if !expectedHasResult && !expectedHasError && len(expectedMap) == 2 { + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err } + return true, nil } - - // Try to remove the output directory if empty - if entries, err := os.ReadDir(outputDir); err == nil && len(entries) == 0 { - err := os.Remove(outputDir) + if responseHasError && expectedHasError && config.DoNotCompareError { + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err } + return true, nil } + } - err = dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + err = dumpJSONs(true, daemonFile, expRspFile, outputDir, response, expectedResponse) + if err != nil { + return false, err + } + + same, err := compareJSON(config, response, jsonFile, daemonFile, expRspFile, diffFile, testNumber) + if err != nil { + return same, err + } + if same { + err := os.Remove(daemonFile) + if err != nil { + return false, err + } + err = os.Remove(expRspFile) + if err != nil { + return false, err + } + err = os.Remove(diffFile) + if err != nil { + return false, err + } + } + + // Try to remove the output directory if empty + if entries, err := os.ReadDir(outputDir); err == nil && len(entries) == 0 { + err := os.Remove(outputDir) if err != nil { return false, err } - return same, nil } err = dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err } - return true, nil + return same, nil } func isArchive(jsonFilename string) bool { From bafc20e9a9a811efd34c4699c8f3ced8dd93933f Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 12 Dec 2025 16:44:40 +0100 Subject: [PATCH 11/87] integration: avoid concurrent cleanup of output dir integration: simplify response comparison --- cmd/integration/main.go | 86 ++++++++++++++++++++--------------------- 1 file changed, 41 insertions(+), 45 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 6b6a0ba1..1ad330d8 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1051,18 +1051,6 @@ func isNotComparedError(testName, net string) bool { return false } -func getJSONFromResponse(target, msg string, verboseLevel int, result interface{}) (interface{}, error) { - if verboseLevel > 2 { - fmt.Printf("%s: [%v]\n", msg, result) - } - - if result == nil { - return nil, errors.New("failed (json response is nil, maybe server is down) on " + target) - } - - return result, nil -} - func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response, expectedResponse interface{}) error { if !dumpJSON { return nil @@ -1596,20 +1584,12 @@ func compareJSON(config *Config, response interface{}, jsonFile, daemonFile, exp return true, nil } -func processResponse(target, target1 string, result, result1 interface{}, responseInFile interface{}, +func processResponse(response, result1 interface{}, responseInFile interface{}, config *Config, outputDir, daemonFile, expRspFile, diffFile, jsonFile string, testNumber int) (bool, error) { - response, err := getJSONFromResponse(target, config.DaemonUnderTest, config.VerboseLevel, result) - if err != nil { - return false, err - } - var expectedResponse interface{} if result1 != nil { - expectedResponse, err = getJSONFromResponse(target1, config.DaemonAsReference, config.VerboseLevel, result1) - if err != nil { - return false, err - } + expectedResponse = result1 } else { expectedResponse = responseInFile } @@ -1622,21 +1602,13 @@ func processResponse(target, target1 string, result, result1 interface{}, respon return true, nil } - if response == nil { - return false, errors.New("[" + config.DaemonUnderTest + "] (server doesn't respond)") - } - - if expectedResponse == nil { - return false, errors.New("[" + config.DaemonAsReference + "] (server doesn't respond)") - } - // Deep comparison between the received response and the expected response respJSON, _ := json.Marshal(response) expJSON, _ := json.Marshal(expectedResponse) // Fast path: if actual/expected are identical byte-wise, no need to compare them if bytes.Equal(respJSON, expJSON) { - err = dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err } @@ -1683,7 +1655,7 @@ func processResponse(target, target1 string, result, result1 interface{}, respon } } - err = dumpJSONs(true, daemonFile, expRspFile, outputDir, response, expectedResponse) + err := dumpJSONs(true, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err } @@ -1707,14 +1679,6 @@ func processResponse(target, target1 string, result, result1 interface{}, respon } } - // Try to remove the output directory if empty - if entries, err := os.ReadDir(outputDir); err == nil && len(entries) == 0 { - err := os.Remove(outputDir) - if err != nil { - return false, err - } - } - err = dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err @@ -1815,13 +1779,18 @@ func runTest(ctx context.Context, jsonFile string, testNumber int, transportType if err != nil { return false, err } - var result1 any - responseInFile := jsonRPC.Response + if config.VerboseLevel > 2 { + fmt.Printf("%s: [%v]\n", config.DaemonUnderTest, result) + } + if result == nil { + return false, errors.New("response is nil (maybe node at " + target + " is down?)") + } + responseInFile := jsonRPC.Response daemonFile := outputAPIFilename + "-response.json" expRspFile := outputAPIFilename + "-expResponse.json" - return processResponse(target, target1, result, result1, responseInFile, config, + return processResponse(result, nil, responseInFile, config, outputDirName, daemonFile, expRspFile, diffFile, jsonFile, testNumber) } else { target = getTarget(DaemonOnDefaultPort, method, config) @@ -1829,17 +1798,28 @@ func runTest(ctx context.Context, jsonFile string, testNumber int, transportType if err != nil { return false, err } + if config.VerboseLevel > 2 { + fmt.Printf("%s: [%v]\n", config.DaemonUnderTest, result) + } + if result == nil { + return false, errors.New("response is nil (maybe node at " + target + " is down?)") + } target1 = getTarget(config.DaemonAsReference, method, config) result1, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target1, config.VerboseLevel) if err != nil { return false, err } - var responseInFile any + if config.VerboseLevel > 2 { + fmt.Printf("%s: [%v]\n", config.DaemonAsReference, result1) + } + if result1 == nil { + return false, errors.New("response is nil (maybe node at " + target1 + " is down?)") + } daemonFile := outputAPIFilename + getJSONFilenameExt(DaemonOnDefaultPort, target) expRspFile := outputAPIFilename + getJSONFilenameExt(config.DaemonAsReference, target1) - return processResponse(target, target1, result, result1, responseInFile, config, + return processResponse(result, result1, nil, config, outputDirName, daemonFile, expRspFile, diffFile, jsonFile, testNumber) } } @@ -2111,6 +2091,22 @@ func main() { close(resultsChan) resultsWg.Wait() + // Clean empty subfolders in the output dir + if entries, err := os.ReadDir(config.OutputDir); err == nil { + for _, entry := range entries { + if !entry.IsDir() { + continue + } + outputSubfolder := filepath.Join(config.OutputDir, entry.Name()) + if subEntries, err := os.ReadDir(outputSubfolder); err == nil && len(subEntries) == 0 { + err := os.Remove(outputSubfolder) + if err != nil { + fmt.Printf("WARN: clean failed %v\n", err) + } + } + } + } + // Clean temp dir err = os.RemoveAll(TempDirname) if err != nil { From d193119415baeac402f95bcdfc74beeac00af57f Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Sat, 13 Dec 2025 11:08:32 +0100 Subject: [PATCH 12/87] integration: handle graceful termination integration: make tasks fully async --- cmd/integration/main.go | 117 ++++++++++++++++++++++++++++++---------- 1 file changed, 89 insertions(+), 28 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 1ad330d8..bac7998a 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -16,12 +16,14 @@ import ( "net/http" "os" "os/exec" + "os/signal" "path/filepath" "regexp" "sort" "strconv" "strings" "sync" + "syscall" "time" bzip2w "github.com/dsnet/compress/bzip2" @@ -1843,7 +1845,7 @@ func main() { os.Exit(-1) } - // Clean temp dirs if exists + // Clean temp dirs if exists // TODO: use OS temp dir? if _, err := os.Stat(TempDirname); err == nil { err := os.RemoveAll(TempDirname) if err != nil { @@ -1857,6 +1859,7 @@ func main() { os.Exit(-1) } + scheduledTests := 0 executedTests := 0 failedTests := 0 successTests := 0 @@ -1897,8 +1900,8 @@ func main() { // Worker pool for parallel execution var wg sync.WaitGroup - testsChan := make(chan *TestDescriptor, 100) - resultsChan := make(chan chan TestResult, 100) + testsChan := make(chan *TestDescriptor, 10000) + resultsChan := make(chan chan TestResult, 10000) numWorkers := 1 if config.Parallel { @@ -1932,27 +1935,52 @@ func main() { resultsWg.Add(1) go func() { defer resultsWg.Done() - for testResultCh := range resultsChan { - result := <-testResultCh - file := fmt.Sprintf("%-60s", result.Test.Name) - tt := fmt.Sprintf("%-15s", result.Test.TransportType) - fmt.Printf("%04d. %s::%s ", result.Test.Number, tt, file) - - if result.Success { - successTests++ - if config.VerboseLevel > 0 { - fmt.Println("OK") - } else { - fmt.Print("OK\r") + for { + select { + case testResultCh := <-resultsChan: + if testResultCh == nil { + return } - } else { - failedTests++ - fmt.Printf("failed: %s\n", result.Error.Error()) - if config.ExitOnFail { - // Signal other tasks to stop and exit - cancelCtx() + select { + case result := <-testResultCh: + file := fmt.Sprintf("%-60s", result.Test.Name) + tt := fmt.Sprintf("%-15s", result.Test.TransportType) + fmt.Printf("%04d. %s::%s ", result.Test.Number, tt, file) + + if result.Success { + successTests++ + if config.VerboseLevel > 0 { + fmt.Println("OK") + } else { + fmt.Print("OK\r") + } + } else { + failedTests++ + fmt.Printf("failed: %s\n", result.Error.Error()) + if config.ExitOnFail { + // Signal other tasks to stop and exit + cancelCtx() + return + } + } + executedTests++ + case <-ctx.Done(): return } + case <-ctx.Done(): + return + } + } + }() + + go func() { + for { + select { + case sig := <-sigs: + fmt.Printf("\nReceived signal: %s. Starting graceful shutdown...\n", sig) + cancelCtx() + case <-ctx.Done(): + return } } }() @@ -1964,12 +1992,24 @@ func main() { }() for testRep = 0; testRep < config.LoopNumber; testRep++ { + select { + case <-ctx.Done(): + break + default: + } + if config.LoopNumber != 1 { fmt.Printf("\nTest iteration: %d\n", testRep+1) } transportTypes := strings.Split(config.TransportType, ",") for _, transportType := range transportTypes { + select { + case <-ctx.Done(): + break + default: + } + testNumberInAnyLoop := 1 dirs, err := os.ReadDir(config.JSONDir) @@ -1990,6 +2030,12 @@ func main() { availableTestedAPIs = 0 for _, currAPIEntry := range dirs { + select { + case <-ctx.Done(): + break + default: + } + currAPI := currAPIEntry.Name() // Skip results folder and hidden folders @@ -2017,6 +2063,12 @@ func main() { testNumber := 1 for _, testEntry := range testEntries { + select { + case <-ctx.Done(): + break + default: + } + testName := testEntry.Name() if !strings.HasPrefix(testName, "test_") { @@ -2057,9 +2109,17 @@ func main() { TransportType: transportType, ResultChan: make(chan TestResult, 1), } - resultsChan <- testDesc.ResultChan - testsChan <- testDesc - executedTests++ + select { + case <-ctx.Done(): + return + case resultsChan <- testDesc.ResultChan: + } + select { + case <-ctx.Done(): + return + case testsChan <- testDesc: + } + scheduledTests++ if config.WaitingTime > 0 { time.Sleep(time.Duration(config.WaitingTime) * time.Millisecond) @@ -2081,7 +2141,7 @@ func main() { } } - if executedTests == 0 && config.TestingAPIsWith != "" { + if scheduledTests == 0 && config.TestingAPIsWith != "" { fmt.Printf("WARN: API filter %s selected no tests\n", config.TestingAPIsWith) } @@ -2116,10 +2176,11 @@ func main() { // Print results elapsed := time.Since(startTime) fmt.Println("\n ") - fmt.Printf("Test time-elapsed: %v\n", elapsed) + fmt.Printf("Test suite duration: %v\n", elapsed) fmt.Printf("Available tests: %d\n", globalTestNumber-1) - fmt.Printf("Available tested api: %d\n", availableTestedAPIs) - fmt.Printf("Number of loop: %d\n", testRep) + fmt.Printf("Available endpoints: %d\n", availableTestedAPIs) + fmt.Printf("Number of loops: %d\n", testRep) + fmt.Printf("Number of scheduled tests: %d\n", scheduledTests) fmt.Printf("Number of executed tests: %d\n", executedTests) fmt.Printf("Number of NOT executed tests: %d\n", testsNotExecuted) fmt.Printf("Number of success tests: %d\n", successTests) From 94f03f3bd8618cb5e4c4a62f2b2b7dfefd0206dd Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Sat, 13 Dec 2025 14:32:45 +0100 Subject: [PATCH 13/87] integration: reorder result report --- cmd/integration/main.go | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index bac7998a..035bf2ab 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1838,6 +1838,11 @@ func mustAtoi(s string) int { } func main() { + // Create a channel to receive OS signals and register for clean termination signals. + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + // Parse command line arguments config := NewConfig() if err := config.parseFlags(); err != nil { _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err) @@ -1863,7 +1868,7 @@ func main() { executedTests := 0 failedTests := 0 successTests := 0 - testsNotExecuted := 0 + skippedTests := 0 var serverEndpoints string if config.VerifyWithDaemon { @@ -2090,7 +2095,7 @@ func main() { tt := fmt.Sprintf("%-15s", transportType) fmt.Printf("%04d. %s::%s skipped\n", testNumberInAnyLoop, tt, file) } - testsNotExecuted++ + skippedTests++ } } else { shouldRun := false @@ -2176,13 +2181,13 @@ func main() { // Print results elapsed := time.Since(startTime) fmt.Println("\n ") - fmt.Printf("Test suite duration: %v\n", elapsed) - fmt.Printf("Available tests: %d\n", globalTestNumber-1) - fmt.Printf("Available endpoints: %d\n", availableTestedAPIs) - fmt.Printf("Number of loops: %d\n", testRep) - fmt.Printf("Number of scheduled tests: %d\n", scheduledTests) + fmt.Printf("Test session duration: %v\n", elapsed) + fmt.Printf("Test session iterations: %d\n", testRep) + fmt.Printf("Test suite total APIs: %d\n", availableTestedAPIs) + fmt.Printf("Test suite total tests: %d\n", globalTestNumber) + fmt.Printf("Number of skipped tests: %d\n", skippedTests) + fmt.Printf("Number of selected tests: %d\n", scheduledTests) fmt.Printf("Number of executed tests: %d\n", executedTests) - fmt.Printf("Number of NOT executed tests: %d\n", testsNotExecuted) fmt.Printf("Number of success tests: %d\n", successTests) fmt.Printf("Number of failed tests: %d\n", failedTests) From 2ae834ec37a5359d91541f544cc912c73d10a10b Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Tue, 16 Dec 2025 13:14:50 +0100 Subject: [PATCH 14/87] integration: add profiling --- cmd/integration/main.go | 81 +++++++++++++++++++++++++++++++++++------ 1 file changed, 70 insertions(+), 11 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 035bf2ab..c7d30aa4 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -19,6 +19,9 @@ import ( "os/signal" "path/filepath" "regexp" + "runtime" + "runtime/pprof" + "runtime/trace" "sort" "strconv" "strings" @@ -513,6 +516,9 @@ type Config struct { TestsOnLatestBlock bool LocalServer string SanitizeArchiveExt bool + CpuProfile string + MemProfile string + TraceFile string } type TestResult struct { @@ -649,6 +655,10 @@ func (c *Config) parseFlags() error { doNotCompareError := flag.Bool("E", false, "do not compare error") flag.BoolVar(doNotCompareError, "do-not-compare-error", false, "do not compare error") + cpuProfile := flag.String("cpuprofile", "", "write cpu profile to file") + memProfile := flag.String("memprofile", "", "write memory profile to file") + traceFile := flag.String("trace", "", "write execution trace to file") + flag.Parse() if *help { @@ -700,6 +710,9 @@ func (c *Config) parseFlags() error { c.DoNotCompareError = *doNotCompareError c.TestsOnLatestBlock = *testOnLatest c.Parallel = !*serial + c.CpuProfile = *cpuProfile + c.MemProfile = *memProfile + c.TraceFile = *traceFile if *daemonPort { c.DaemonUnderTest = DaemonOnOtherPort @@ -1837,7 +1850,7 @@ func mustAtoi(s string) int { return n } -func main() { +func runMain() int { // Create a channel to receive OS signals and register for clean termination signals. sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) @@ -1847,21 +1860,62 @@ func main() { if err := config.parseFlags(); err != nil { _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err) usage() - os.Exit(-1) + return -1 + } + + // Handle embedded CPU/memory profiling and execution tracing + if config.CpuProfile != "" { + f, err := os.Create(config.CpuProfile) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "could not create CPU profile: %v\n", err) + } + defer f.Close() + if err := pprof.StartCPUProfile(f); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "could not start CPU profile: %v\n", err) + } + defer pprof.StopCPUProfile() + } + + // Execution trace + if config.TraceFile != "" { + f, err := os.Create(config.TraceFile) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "could not create trace file: %v\n", err) + } + defer f.Close() + if err := trace.Start(f); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "could not start trace: %v\n", err) + } + defer trace.Stop() } + // Memory profiling at end + defer func() { + if config.MemProfile != "" { + f, err := os.Create(config.MemProfile) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "could not create memory profile: %v\n", err) + } + defer f.Close() + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(f); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "could not write memory profile: %v\n", err) + } + } + }() + // Clean temp dirs if exists // TODO: use OS temp dir? if _, err := os.Stat(TempDirname); err == nil { err := os.RemoveAll(TempDirname) if err != nil { - os.Exit(-1) + return -1 } } startTime := time.Now() err := os.MkdirAll(config.OutputDir, 0755) if err != nil { - os.Exit(-1) + return -1 } scheduledTests := 0 @@ -1895,7 +1949,7 @@ func main() { resultsAbsoluteDir, err := filepath.Abs(config.ResultsDir) if err != nil { - os.Exit(-1) + return -1 } fmt.Printf("Result directory: %s\n", resultsAbsoluteDir) @@ -2021,7 +2075,7 @@ func main() { if err != nil { _, err := fmt.Fprintf(os.Stderr, "Error reading directory %s: %v\n", config.JSONDir, err) if err != nil { - return + return -1 } continue } @@ -2116,12 +2170,12 @@ func main() { } select { case <-ctx.Done(): - return + return -1 case resultsChan <- testDesc.ResultChan: } select { case <-ctx.Done(): - return + return -1 case testsChan <- testDesc: } scheduledTests++ @@ -2175,7 +2229,7 @@ func main() { // Clean temp dir err = os.RemoveAll(TempDirname) if err != nil { - os.Exit(-1) + return -1 } // Print results @@ -2192,7 +2246,12 @@ func main() { fmt.Printf("Number of failed tests: %d\n", failedTests) if failedTests > 0 { - os.Exit(1) + return 1 } - os.Exit(0) + return 0 +} + +func main() { + exitCode := runMain() + os.Exit(exitCode) } From deb700818b836b9fc3eeb738d836a13b7a8b088c Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Wed, 17 Dec 2025 09:16:54 +0100 Subject: [PATCH 15/87] integration: cleaner errors and http round-trip log --- cmd/integration/main.go | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index c7d30aa4..fd99d7a2 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1168,7 +1168,7 @@ func validateJsonRpcObject(response map[string]any, strict bool) error { // validateJsonRpcResponse checks that the received response is a valid JSON-RPC message, according to 2.0 spec. // This implies that the response must be either a valid JSON-RPC object, i.e. a JSON object containing at least -// "jsonrpc" and "id" fields or a JSON array where each element (if any) is in turn a valid JSON-RPC object. +// "jsonrpc" and "id" fields or a JSON array where each element (if any) is in turn a valid JSON-RPC object. func validateJsonRpcResponse(response any) error { _, isArray := response.([]any) responseAsMap, isMap := response.(map[string]any) @@ -1234,7 +1234,12 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t req.Header.Set(k, v) } + start := time.Now() resp, err := client.Do(req) + elapsed := time.Since(start) + if verboseLevel > 1 { + fmt.Printf("http round-trip time: %v\n", elapsed) + } if err != nil { if verboseLevel > 0 { fmt.Printf("\nhttp connection fail: %s %v\n", targetURL, err) @@ -1740,14 +1745,14 @@ func runTest(ctx context.Context, jsonFile string, testNumber int, transportType jsonrpcCommands, err = extractJsonCommands(tempFilePath) if err != nil { removeTempFiles() - return false, errors.New("cannot extract JSONRPC commands from " + tempFilePath) + return false, err } } removeTempFiles() } else { jsonrpcCommands, err = extractJsonCommands(jsonFilename) if err != nil { - return false, errors.New("cannot extract JSONRPC commands from " + jsonFilename) + return false, err } } @@ -1869,7 +1874,12 @@ func runMain() int { if err != nil { _, _ = fmt.Fprintf(os.Stderr, "could not create CPU profile: %v\n", err) } - defer f.Close() + defer func(f *os.File) { + err := f.Close() + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "could not close CPU profile: %v\n", err) + } + }(f) if err := pprof.StartCPUProfile(f); err != nil { _, _ = fmt.Fprintf(os.Stderr, "could not start CPU profile: %v\n", err) } @@ -1882,7 +1892,12 @@ func runMain() int { if err != nil { _, _ = fmt.Fprintf(os.Stderr, "could not create trace file: %v\n", err) } - defer f.Close() + defer func(f *os.File) { + err := f.Close() + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "could not close trace file: %v\n", err) + } + }(f) if err := trace.Start(f); err != nil { _, _ = fmt.Fprintf(os.Stderr, "could not start trace: %v\n", err) } @@ -1896,7 +1911,12 @@ func runMain() int { if err != nil { _, _ = fmt.Fprintf(os.Stderr, "could not create memory profile: %v\n", err) } - defer f.Close() + defer func(f *os.File) { + err := f.Close() + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "could not close memory profile: %v\n", err) + } + }(f) runtime.GC() // get up-to-date statistics if err := pprof.WriteHeapProfile(f); err != nil { _, _ = fmt.Fprintf(os.Stderr, "could not write memory profile: %v\n", err) From 36ea66c90701ad45c3417d4db3f75278b518d2d9 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Wed, 17 Dec 2025 18:41:33 +0100 Subject: [PATCH 16/87] integration: support metadata in json files --- cmd/integration/main.go | 329 ++++++----- .../test_03.json | 464 ++++++++-------- .../test_04.json | 246 +++++---- .../test_05.tar | Bin 14139 -> 36864 bytes .../test_06.tar | Bin 13573 -> 35328 bytes .../test_07.tar | Bin 9923 -> 26112 bytes .../test_10.json | 396 +++++++------- .../test_11.json | 52 +- .../test_13.tar | Bin 7061 -> 18944 bytes .../test_15.tar | Bin 21402 -> 55296 bytes .../test_16.tar | Bin 12333 -> 32256 bytes .../test_17.tar | Bin 12545 -> 32768 bytes .../test_18.json | 516 +++++++++--------- .../mainnet/eth_createAccessList/test_08.json | 90 +-- .../mainnet/eth_createAccessList/test_09.json | 100 ++-- .../mainnet/eth_createAccessList/test_17.json | 122 +++-- 16 files changed, 1211 insertions(+), 1104 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index fd99d7a2..7dc728a4 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -401,10 +401,10 @@ func extractArchive(archivePath string, sanitizeExtension bool) ([]string, error } archivePath = archivePath + compressionType } - inputFile, err = os.Open(archivePath) - if err != nil { - return nil, err - } + } + inputFile, err = os.Open(archivePath) + if err != nil { + return nil, err } } @@ -534,9 +534,26 @@ type TestDescriptor struct { ResultChan chan TestResult } +type JsonRpcResponseMetadata struct { + PathOptions json.RawMessage `json:"pathOptions"` +} + +type JsonRpcTestMetadata struct { + Request interface{} `json:"request"` + Response *JsonRpcResponseMetadata `json:"response"` +} + +type JsonRpcTest struct { + Identifier string `json:"id"` + Reference string `json:"reference"` + Description string `json:"description"` + Metadata *JsonRpcTestMetadata `json:"metadata"` +} + type JSONRPCCommand struct { - Request interface{} `json:"request"` - Response interface{} `json:"response"` + Request interface{} `json:"request"` + Response interface{} `json:"response"` + TestInfo *JsonRpcTest `json:"test"` } func NewConfig() *Config { @@ -1351,39 +1368,6 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t } } -func compareJSONFiles(errorFileName, fileName1, fileName2, diffFileName string) (bool, error) { - switch jsonDiffKind { - case JdLibrary: - jsonNode1, err := jd.ReadJsonFile(fileName1) - if err != nil { - return false, err - } - jsonNode2, err := jd.ReadJsonFile(fileName2) - if err != nil { - return false, err - } - diff := jsonNode1.Diff(jsonNode2, jd.SET) - diffString := diff.Render() - err = os.WriteFile(diffFileName, []byte(diffString), 0644) - if err != nil { - return false, err - } - return true, nil - case JsonDiffTool: - if success := runCompare(true, errorFileName, fileName1, fileName2, diffFileName); !success { - return false, fmt.Errorf("failed to compare %s and %s using json-diff command", fileName1, fileName2) - } - return true, nil - case DiffTool: - if success := runCompare(false, errorFileName, fileName1, fileName2, diffFileName); !success { - return false, fmt.Errorf("failed to compare %s and %s using diff command", fileName1, fileName2) - } - return true, nil - default: - return false, fmt.Errorf("unknown JSON diff kind: %d", jsonDiffKind) - } -} - func runCompare(useJSONDiff bool, errorFile, tempFile1, tempFile2, diffFile string) bool { var cmd *exec.Cmd alreadyFailed := false @@ -1512,7 +1496,70 @@ var ( errDiffMismatch = errors.New("diff mismatch") ) -func compareJSON(config *Config, response interface{}, jsonFile, daemonFile, expRspFile, diffFile string, testNumber int) (bool, error) { +func isArchive(jsonFilename string) bool { + // Treat all files except .json as potential archive files + return !strings.HasSuffix(jsonFilename, ".json") +} + +func extractJsonCommands(jsonFilename string) ([]JSONRPCCommand, error) { + var jsonrpcCommands []JSONRPCCommand + data, err := os.ReadFile(jsonFilename) + if err != nil { + return jsonrpcCommands, errors.New("cannot read file " + jsonFilename) + } + if err := json.Unmarshal(data, &jsonrpcCommands); err != nil { + return jsonrpcCommands, errors.New("cannot parse JSON " + jsonFilename) + } + return jsonrpcCommands, nil +} + +func (c *JSONRPCCommand) compareJSONFiles(errorFileName, fileName1, fileName2, diffFileName string) (bool, error) { + switch jsonDiffKind { + case JdLibrary: + jsonNode1, err := jd.ReadJsonFile(fileName1) + if err != nil { + return false, err + } + jsonNode2, err := jd.ReadJsonFile(fileName2) + if err != nil { + return false, err + } + var diff jd.Diff + // Check if the test contains any response metadata with custom options for JSON diff + if c.TestInfo != nil && c.TestInfo.Metadata != nil && c.TestInfo.Metadata.Response != nil { + if c.TestInfo.Metadata.Response.PathOptions != nil { + pathOptions := c.TestInfo.Metadata.Response.PathOptions + options, err := jd.ReadOptionsString(string(pathOptions)) + if err != nil { + return false, err + } + diff = jsonNode1.Diff(jsonNode2, options...) + } + } else { + diff = jsonNode1.Diff(jsonNode2) + } + diffString := diff.Render() + err = os.WriteFile(diffFileName, []byte(diffString), 0644) + if err != nil { + return false, err + } + return true, nil + case JsonDiffTool: + if success := runCompare(true, errorFileName, fileName1, fileName2, diffFileName); !success { + return false, fmt.Errorf("failed to compare %s and %s using json-diff command", fileName1, fileName2) + } + return true, nil + case DiffTool: + if success := runCompare(false, errorFileName, fileName1, fileName2, diffFileName); !success { + return false, fmt.Errorf("failed to compare %s and %s using diff command", fileName1, fileName2) + } + return true, nil + default: + return false, fmt.Errorf("unknown JSON diff kind: %d", jsonDiffKind) + } +} + +func (c *JSONRPCCommand) compareJSON(config *Config, response interface{}, jsonFile, daemonFile, expRspFile, diffFile string, testNumber int) (bool, error) { baseName := filepath.Join(TempDirname, fmt.Sprintf("test_%d", testNumber)) err := os.MkdirAll(baseName, 0755) if err != nil { @@ -1567,7 +1614,7 @@ func compareJSON(config *Config, response interface{}, jsonFile, daemonFile, exp } } - diffResult, err := compareJSONFiles(errorFile, tempFile1, tempFile2, diffFile) + diffResult, err := c.compareJSONFiles(errorFile, tempFile1, tempFile2, diffFile) diffFileSize := int64(0) if diffResult { @@ -1604,8 +1651,9 @@ func compareJSON(config *Config, response interface{}, jsonFile, daemonFile, exp return true, nil } -func processResponse(response, result1 interface{}, responseInFile interface{}, - config *Config, outputDir, daemonFile, expRspFile, diffFile, jsonFile string, testNumber int) (bool, error) { +func (c *JSONRPCCommand) processResponse(response, result1 any, responseInFile interface{}, config *Config, outputDir, daemonFile, expRspFile, diffFile string, descriptor *TestDescriptor) (bool, error) { + jsonFile := descriptor.Name + testNumber := descriptor.Number var expectedResponse interface{} if result1 != nil { @@ -1680,7 +1728,7 @@ func processResponse(response, result1 interface{}, responseInFile interface{}, return false, err } - same, err := compareJSON(config, response, jsonFile, daemonFile, expRspFile, diffFile, testNumber) + same, err := c.compareJSON(config, response, jsonFile, daemonFile, expRspFile, diffFile, testNumber) if err != nil { return same, err } @@ -1706,25 +1754,98 @@ func processResponse(response, result1 interface{}, responseInFile interface{}, return same, nil } -func isArchive(jsonFilename string) bool { - // Treat all files except .json as potential archive files - return !strings.HasSuffix(jsonFilename, ".json") -} +func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *TestDescriptor) (bool, error) { + transportType := descriptor.TransportType + jsonFile := descriptor.Name + request := c.Request -func extractJsonCommands(jsonFilename string) ([]JSONRPCCommand, error) { - var jsonrpcCommands []JSONRPCCommand - data, err := os.ReadFile(jsonFilename) - if err != nil { - return jsonrpcCommands, errors.New("cannot read file " + jsonFilename) + method := "" + requestBytes, _ := json.Marshal(request) + var requestMap map[string]interface{} + if err := json.Unmarshal(requestBytes, &requestMap); err == nil { + if m, ok := requestMap["method"].(string); ok { + method = m + } + } else { + // Try an array of requests + var requestArray []map[string]interface{} + if err := json.Unmarshal(requestBytes, &requestArray); err == nil && len(requestArray) > 0 { + if m, ok := requestArray[0]["method"].(string); ok { + method = m + } + } } - if err := json.Unmarshal(data, &jsonrpcCommands); err != nil { - return jsonrpcCommands, errors.New("cannot parse JSON " + jsonFilename) + + requestDumps, _ := json.Marshal(request) + target := getTarget(config.DaemonUnderTest, method, config) + target1 := "" + + var jwtAuth string + if config.JWTSecret != "" { + secretBytes, _ := hex.DecodeString(config.JWTSecret) + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "iat": time.Now().Unix(), + }) + tokenString, _ := token.SignedString(secretBytes) + jwtAuth = "Bearer " + tokenString + } + + outputAPIFilename := filepath.Join(config.OutputDir, strings.TrimSuffix(jsonFile, filepath.Ext(jsonFile))) + outputDirName := filepath.Dir(outputAPIFilename) + diffFile := outputAPIFilename + "-diff.json" + + if !config.VerifyWithDaemon { + result, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target, config.VerboseLevel) + if err != nil { + return false, err + } + if config.VerboseLevel > 2 { + fmt.Printf("%s: [%v]\n", config.DaemonUnderTest, result) + } + if result == nil { + return false, errors.New("response is n il (maybe node at " + target + " is down?)") + } + + responseInFile := c.Response + daemonFile := outputAPIFilename + "-response.json" + expRspFile := outputAPIFilename + "-expResponse.json" + + return c.processResponse(result, nil, responseInFile, config, + outputDirName, daemonFile, expRspFile, diffFile, descriptor) + } else { + target = getTarget(DaemonOnDefaultPort, method, config) + result, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target, config.VerboseLevel) + if err != nil { + return false, err + } + if config.VerboseLevel > 2 { + fmt.Printf("%s: [%v]\n", config.DaemonUnderTest, result) + } + if result == nil { + return false, errors.New("response is nil (maybe node at " + target + " is down?)") + } + target1 = getTarget(config.DaemonAsReference, method, config) + result1, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target1, config.VerboseLevel) + if err != nil { + return false, err + } + if config.VerboseLevel > 2 { + fmt.Printf("%s: [%v]\n", config.DaemonAsReference, result1) + } + if result1 == nil { + return false, errors.New("response is nil (maybe node at " + target1 + " is down?)") + } + + daemonFile := outputAPIFilename + getJSONFilenameExt(DaemonOnDefaultPort, target) + expRspFile := outputAPIFilename + getJSONFilenameExt(config.DaemonAsReference, target1) + + return c.processResponse(result, result1, nil, config, + outputDirName, daemonFile, expRspFile, diffFile, descriptor) } - return jsonrpcCommands, nil } -func runTest(ctx context.Context, jsonFile string, testNumber int, transportType string, config *Config) (bool, error) { - jsonFilename := filepath.Join(config.JSONDir, jsonFile) +func runTest(ctx context.Context, descriptor *TestDescriptor, config *Config) (bool, error) { + jsonFilename := filepath.Join(config.JSONDir, descriptor.Name) var jsonrpcCommands []JSONRPCCommand var err error @@ -1756,94 +1877,12 @@ func runTest(ctx context.Context, jsonFile string, testNumber int, transportType } } - for _, jsonRPC := range jsonrpcCommands { - request := jsonRPC.Request - method := "" - - requestBytes, _ := json.Marshal(request) - var requestMap map[string]interface{} - if err := json.Unmarshal(requestBytes, &requestMap); err == nil { - if m, ok := requestMap["method"].(string); ok { - method = m - } - } else { - // Try an array of requests - var requestArray []map[string]interface{} - if err := json.Unmarshal(requestBytes, &requestArray); err == nil && len(requestArray) > 0 { - if m, ok := requestArray[0]["method"].(string); ok { - method = m - } - } - } - - requestDumps, _ := json.Marshal(request) - target := getTarget(config.DaemonUnderTest, method, config) - target1 := "" - - var jwtAuth string - if config.JWTSecret != "" { - secretBytes, _ := hex.DecodeString(config.JWTSecret) - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "iat": time.Now().Unix(), - }) - tokenString, _ := token.SignedString(secretBytes) - jwtAuth = "Bearer " + tokenString - } - - outputAPIFilename := filepath.Join(config.OutputDir, strings.TrimSuffix(jsonFile, filepath.Ext(jsonFile))) - outputDirName := filepath.Dir(outputAPIFilename) - diffFile := outputAPIFilename + "-diff.json" - - if !config.VerifyWithDaemon { - result, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target, config.VerboseLevel) - if err != nil { - return false, err - } - if config.VerboseLevel > 2 { - fmt.Printf("%s: [%v]\n", config.DaemonUnderTest, result) - } - if result == nil { - return false, errors.New("response is nil (maybe node at " + target + " is down?)") - } - - responseInFile := jsonRPC.Response - daemonFile := outputAPIFilename + "-response.json" - expRspFile := outputAPIFilename + "-expResponse.json" - - return processResponse(result, nil, responseInFile, config, - outputDirName, daemonFile, expRspFile, diffFile, jsonFile, testNumber) - } else { - target = getTarget(DaemonOnDefaultPort, method, config) - result, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target, config.VerboseLevel) - if err != nil { - return false, err - } - if config.VerboseLevel > 2 { - fmt.Printf("%s: [%v]\n", config.DaemonUnderTest, result) - } - if result == nil { - return false, errors.New("response is nil (maybe node at " + target + " is down?)") - } - target1 = getTarget(config.DaemonAsReference, method, config) - result1, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target1, config.VerboseLevel) - if err != nil { - return false, err - } - if config.VerboseLevel > 2 { - fmt.Printf("%s: [%v]\n", config.DaemonAsReference, result1) - } - if result1 == nil { - return false, errors.New("response is nil (maybe node at " + target1 + " is down?)") - } - - daemonFile := outputAPIFilename + getJSONFilenameExt(DaemonOnDefaultPort, target) - expRspFile := outputAPIFilename + getJSONFilenameExt(config.DaemonAsReference, target1) - - return processResponse(result, result1, nil, config, - outputDirName, daemonFile, expRspFile, diffFile, jsonFile, testNumber) - } + for _, jsonrpcCmd := range jsonrpcCommands { + return jsonrpcCmd.run(ctx, config, descriptor) // TODO: support multiple tests } + fmt.Printf("WARN: no commands found in test %s\n", jsonFilename) + return true, nil } @@ -2000,7 +2039,7 @@ func runMain() int { if test == nil { return } - success, err := runTest(ctx, test.Name, test.Number, test.TransportType, config) + success, err := runTest(ctx, test, config) test.ResultChan <- TestResult{Success: success, Error: err, Test: test} case <-ctx.Done(): return diff --git a/integration/mainnet/debug_getModifiedAccountsByNumber/test_03.json b/integration/mainnet/debug_getModifiedAccountsByNumber/test_03.json index 09fbcc2d..993995e8 100644 --- a/integration/mainnet/debug_getModifiedAccountsByNumber/test_03.json +++ b/integration/mainnet/debug_getModifiedAccountsByNumber/test_03.json @@ -1,232 +1,242 @@ [ - { - "request": { - "id": 1, - "jsonrpc": "2.0", - "method": "debug_getModifiedAccountsByNumber", - "params": [ - 6002534, - 6002536 - ] - }, + { + "test": { + "id": "debug_getModifiedAccountsByNumber_6002534_6002536", + "reference": "", + "description": "modified accounts between block 6002534 and 6002536", + "metadata": { "response": { - "id": 1, - "jsonrpc": "2.0", - "result": [ - "0x564a97676045620b66c3bff9e025df714492aa7e", - "0x829bd824b016326a401d083b33d092293333a830", - "0xb19710cd90d74baf9338a789e15d655166a1a06f", - "0xe3c10c130c7ac43c21abbf5efe9777a276bd37be", - "0x77e17f1e534cf753a2a8f707e04964b29c52412d", - "0xf2190f5fe98dcbfe94cae8f2a315dc76ac5ab2e6", - "0xf4b5b3b7be39bde26146fcbf359e195310cc1de3", - "0x45c8c3ce6dd090f14001d83005bc75cacb6702c7", - "0x37d4b604b210c05f766071b15096bb0aa5510586", - "0x5418421de11897c9dfee01dd2792f960389a1a7b", - "0x87df173491f9de9c570cf039dd06d6bf9ec07ffe", - "0x8cce797b0d7e491f3042e6e1144e87df8749d5c6", - "0xb3a1e376fd1dbbcb13a086e074547701ea6a536a", - "0xf6083b449eb3c128c25fcd32c7e0b1da1708b844", - "0xfcdfa08971b20e8849a0ccc274732510a550445b", - "0x0d5eae90c6299da092cb0a0a12bcd89defb02d98", - "0x21c35f9fbb69ade2205cf9d01f6865fc941f97e8", - "0xa7a7899d944fe658c4b0a1803bab2f490bd3849e", - "0xe06be5fe129334f870792a7b4397495270743c54", - "0xea674fdde714fd979de3edf0f56aa9716b898ec8", - "0x14fbca95be7e99c15cc2996c6c9d841e54b79425", - "0x30146933a3a0babc74ec0b3403bec69281ba5914", - "0x32656891a0049f8c51cf331f0c654e4c27c9b95a", - "0x73f81cc6fa7c6823f8bf2b5c1e0b3ce7336407f1", - "0x9f2c0f640d2291d0fbd0f2a630372863cce49993", - "0xd17fbe0beb18482792d7457fc9c10592eecfc0e4", - "0xe4c89b9fcab29c5bee3971b698cca4528f2644e2", - "0x0069f3a8e64db8c7fdb05835755153850a36dcbd", - "0x08d32b0da63e2c3bcf8019c9c5d849d7a9d791e6", - "0x38d13366cd1a19ab002ea6021a4aa02b7afa6722", - "0x954a70eb5b5fb1a031a762bb6c6e5546c784ca86", - "0xb3775fb83f7d12a36e0475abdd1fca35c091efbe", - "0xd850942ef8811f2a866692a623011bde52a462c1", - "0x0807bef27f8d978e19ef1d9723c36caa312fff14", - "0x2772b5fcd39ff2aadcc1f5fa21cec75479ead883", - "0x2a0c0dbecc7e4d658f48e01e3fa353f44050c208", - "0xe841d388ed30c0988d5a2db071062c202f9c3a86", - "0xf65775cd5ca5c627dba83ce92787b3fd00fd2085", - "0xfb5fea8f0a93ce09334bfc2ce0df5f7b6c559eb1", - "0x98e74997fdec3166792d3617079176405e1387d0", - "0x9a4d6f927eef4b5f5a5da9765dc5076f312182ea", - "0x0f007aa14325f3d477e19fbe8a022ce2418c12d5", - "0x86cae9ae04a08481770cc4094805e77ff3cfe6e3", - "0xf9ba0955b0509ac6138908ccc50d5bd296e48d7d", - "0x0f430c4164338588efd2675a396a880f2bd4e8b0", - "0xae1d8e8915a26fce99964b2083cc8351036057f3", - "0xf4605ceea54add5965f0ebb7313f321648901068", - "0x8e2add6138f2d6da35dedcc1bd868a19f1064e41", - "0x73957709695e73fd175582105c44743cf0fb6f2f", - "0x423b5f62b328d0d6d44870f4eee316befa0b2df5", - "0xdc69ad15c432d55e97e174b483832af78b7ff862", - "0x39d1cd18c628f2edac0085b0cf35e08c67c31b61", - "0x61469f3cc14ebb4ecf03f15a7cab098a485d16cf", - "0x7051620d11042c4335069aaa4f10cd3b4290c681", - "0xa69484689b8baef0255399f32d389e0a15ac4d6d", - "0xb01cb49fe0d6d6e47edf3a072d15dfe73155331c", - "0x209c4784ab1e8183cf58ca33cb740efbf3fc18ef", - "0x5419832a9190ec26c5b5196157d4625f5c82c2e5", - "0x8b5e0d2bcf17bf2a151e9d05d869742fb8fbd312", - "0xb3750203d6b17e30f39e6b9ea9a22c7dc30709d5", - "0x28d70abd2efdfdd6608de2b343a414e3f4120c5c", - "0x2c479737111507f93914a234051d0b7e15ef5dc1", - "0xe99ddae9181957e91b457e4c79a1b577e55a5742", - "0xd3339a8dfe03fba58ce11bc90d6c35c7b970f5c9", - "0x825d5d0df3b2d59f69cc673f041ca91a296b8183", - "0x94130de66f0fb367eaba6e99aeccedcc83e983ad", - "0xa1341b2f9dc6d05a48d6bc95271cd0b82df7a51b", - "0xdfcd0d7c0742df7dfd7bf13860f6cd37963c7c59", - "0x6aba1623ea906d1164cbb007e764ebde2514a2ba", - "0x70d837e703dd75ce40a5891e3ebdb0648f7caefb", - "0x96e12b9ca892c08296678dc20b498c715f15d1f0", - "0xb9ae7e4dec1b32526c914ef11fbd6c34fbdeaa56", - "0x0000000000000000000000000000000000000001", - "0x0346537b1f999ded3256203dc404db6474aa9017", - "0x4401508a01bd8b0cc1b7e1c5316b8a16b74aefc8", - "0x2ff01d129f37ce59fbaf3aa23860744efb034a82", - "0x56628d1a4d282bc6b7d72b52973c809354ea9342", - "0xdae1ced3921c340165bba6f8260c3a1abc381164", - "0xf4cd88db4b31f8b972897f946282b91c0c750124", - "0xf296c068af7fb616c5d087f94d3e2bdc44790067", - "0x79ebdbcb788463afc8eb5a0e9707f6ed4acf97a7", - "0x8078660eb8f4fca66a37d0fb9a8aa2ea0c55b1c4", - "0x3c0a03ec54742b73e883c596303711ff1ee08c51", - "0x5b135d7e2774c801a73208f258123d7623e07784", - "0xbd4abef9a9ec6147747275e53a108607fa2b20e3", - "0x002da5bdf3e0a4d1d173af06b9946813dd46a6de", - "0x51cd3cf94a321955868c46473f2c36b5cebf7411", - "0x41ab75435668919bb507f871dd01e9762c2d173a", - "0x5994e187900eac75194976fe3a66037f4fb4c8c9", - "0xd1ceeeefa68a6af0a5f6046132d986066c7f9426", - "0xf8c595d070d104377f58715ce2e6c93e49a87f3c", - "0x2ee3b41f8413649ee1be42a1f2b10f2025b8e4d0", - "0x641fbb7ddaa90f9a6cdaf9b3c1c344e0d3d7791d", - "0x74fd51a98a4a1ecbef8cc43be801cce630e260bd", - "0x7600977eb9effa627d6bd0da2e5be35e11566341", - "0xc7029ed9eba97a096e72607f4340c34049c7af48", - "0x5b429909577bf48b7ba4959ba02cee7aebb0f37d", - "0x6a27348483d59150ae76ef4c0f3622a78b0ca698", - "0x99006250906ef07520735c62956e0cfdd99b741d", - "0xa4ea687a2a7f29cf2dc66b39c68e4411c0d00c49", - "0xde9bb11b21687e3fd9e72fc7e0278e8549068993", - "0x7f3d397286d277b19250f9ef642f44069602365d", - "0x0b32d64995884cb19e5c6b287519e19167ef8899", - "0x1e24a184d8219b42c1662964574182539ae723c2", - "0xf0868e619e3394e020d9c0afe055071191a120fc", - "0x3c94feba1e5e2291f9ef0efb3779f14ef4a3c64f", - "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5", - "0x9d9748b0a21cf0ad2707474cc83c5f165712e05f", - "0xe2b3d2ecf49383556c9d18983a2dc874ee718ff1", - "0x0168b06c9b7511a67d54d81a0a890e0f30fe1583", - "0x06795683b9206e1850a7d84226d0e47fefb1b2a4", - "0x46553c21bc0a3e06cc7405c5ccd1125ec8452338", - "0xe7c50a01d5c7e84c63d7d274f8199aaac7ca0044", - "0x4b1f3bca46002fe04423e1512d5068bba0da5b32", - "0x9e69b14fe09eace4e39ee12edb786cb39ccc782b", - "0xfb7442ac247ae842238b3e060cd8a5798c1969e3", - "0x2462b8c0175b473234c13432ffab2f397bc5c3e2", - "0xac6eb79af129c9dc8024ca5222bb183ab8e01e01", - "0xce705918817413955da24a2a1bf3cbad9d6abd70", - "0x00000000c0293c8ca34dac9bcc0f953532d34e4d", - "0x32be343b94f860124dc4fee278fdcbd38c102d88", - "0xaee71ef73f9c04e98f5c2a1abb7087ea5abce78c", - "0xb772893ca296626546363dc02ad2d6a9adf4482e", - "0xdad30f229cf0b9e094695e1c95a87969609d9955", - "0x2494ee4aee899f0f813a36b163ef62881686b777", - "0xfb77a964c685fb9f11ab08be08622bf7d8b61c9e", - "0xe6423dca37a37b438edfd053bdf0ab1b62cc1dd4", - "0xd60d353610d9a5ca478769d371b53cefaa7b6e4c", - "0x8db0071675cdb20bca105009a0c7e6d316626123", - "0x655d7989dbf22869181a95aafb94f42aac431dd4", - "0x0ad3227eb47597b566ec138b3afd78cfea752de5", - "0x498ed67594a93cb2b5bfc487b030dafa90996906", - "0xe90a880501c9b7a6e43b07f0dde712df7eaad0ac", - "0xe839d7c4d76b70ef80ac0927bf4248c3bdf236d3", - "0x004075e4d4b1ce6c48c81cc940e2bad24b489e64", - "0x1edfa18469285c1de8a307f6c2a231287924caa2", - "0x4265cfa8a6b1d941531f6d0acf4774c5b7b7eb37", - "0xb84437f8e7bc8300418abc76ef7d858281f6d314", - "0xe8cca7f750d4f446e7b3f6e365247dc401d95e47", - "0x09a99acdc74f74da4832a4ea7db28cb872a19b9a", - "0x574577eee9a8402f43019d38f865c3931e48d0df", - "0x5f0bfe2ac3c0a34eca548d377315c2d3fe60c84f", - "0x4a4330fa9e4e343a5560e0aecef290b4a9f42f44", - "0x7da47ca0de0797ffedfea9194cac9a8a5d0cd0cc", - "0x8fe869535e0af7366d845a641e3feda460c2861f", - "0xfbb1b73c4f0bda4f67dca266ce6ef42f520fbb98", - "0x89d24a6b4ccb1b6faa2625fe562bdd9a23260359", - "0x99163ad81a1613634e1cafc30bb1bc83e6e35716", - "0xa34a8fb76c137eb016e8adaecf892f74629faa62", - "0x060061095f16b1110039f14fc78cde263ca018a8", - "0x464ebe77c293e473b48cfe96ddcf88fcf7bfdac0", - "0x669fa9e30916e0c244849d4266560fdd6feeeee2", - "0x880aa7cbf18e42fa185de3a82ad6308a86e2acc1", - "0xa372c79e415e00f63348c322227364f8f07347ea", - "0xdcd6968c5e40a6b26cabca51e818b0404082c156", - "0x0d0707963952f2fba59dd06f2b425ace40b492fe", - "0x1ce7ae555139c5ef5a57cc8d814a867ee6ee33d8", - "0x251ac92106d0181dbf4c80c8441bf0d0c4ce0f07", - "0x419e84b3fe15b4e414db4662ce8df93a87bfccce", - "0xf64edd94558ca8b3a0e3b362e20bb13ff52ea513", - "0xb64ef51c888972c908cfacf59b47c1afbc0ab8ac", - "0xf3dc28037d87942433a6f8b1e5c34070604c78df", - "0xb4eb12ae75c9aa2f88faca82e49f285adc8b6d8d", - "0x8fb842ccc4563ee276c9970effcb67e7bc1ba5fe", - "0xf6d865332fa044e23aec4ab815edab957bfbf8e4", - "0x05f51aab068caa6ab7eeb672f88c180f67f17ec7", - "0xc179fbddc946694d11185d4e15dbba5fd0adac0a", - "0xbc832776a6e9342f7ae92036e2dc76a00b9630ec", - "0x23f229174fd83b7a219024ff9d920c4c2cdccd13", - "0x500e05cc004b5197387e8ca37526c3dea79d1978", - "0x9dcdbcfbd2996e607927c189c7c98eb19aa378cd", - "0x6b9e9e10abb661b56b0602817c3f4bcd7f4d32c2", - "0x88a29ce92821e9aa9d5d5b01b5a011a4ab004b84", - "0x8d4a36eeadb278ac6cda5b87cd38577fa00db043", - "0xb8eb7073716bcc6beed4fce7cda2e64da8ef8bd3", - "0x607a5c47978e2eb6d59c6c6f51bc0bf411f4b85a", - "0x77c23e39cdacba3f1e81314e164358fc8ad50ea6", - "0xbd168cbf9d3a375b38dc51a202b5e8a4e52069ed", - "0x739f745731c58ced32e0cd528c8a48332e612c2f", - "0x96bc2bc24e3886550e02b0199c07ef9dcb92f36f", - "0xbc73017522d1603ce5a460f26f45db94d7740247", - "0x14c03c8a88c22a57e281f7890919982a2ae1bb1a", - "0x5ce46f6fab9da1fb93edf4fa34d98e2d49a62e2d", - "0x68d7dacb9d43f1f12c33c58ad7aea54011accde9", - "0x1e466748604517c88dfd6c0fb2c4977fed7cf6cf", - "0x3ae568669be648088f6f705bd8ea5d001154584b", - "0x3f98f0697eefef581220500ad6b3bf11296056ee", - "0x90cbab41b057b4e0c2b53f8ebce73d7d9503dcfd", - "0xdd4950f977ee28d2c132f1353d1595035db444ee", - "0xa6a7d616dbbb6bf5343b37577aa5c319ef33e311", - "0x01fb668f734e272ac4ed4bdd2ae6ff0e0210f9d8", - "0x1bae8963cfc7df0d18c783175bbde7e51de03e5b", - "0x601b50be525533e7a5a8958f8176aee5798e3106", - "0xa82afb67d3882646b36ef8a3fdc22e974f363304", - "0xdadaff149b7391ff2f0b3f04480bf24d6c611b6d", - "0xde503e256ae6fa00b32988ce4b61d73f4013a456", - "0x005f5fe7c3cd6cd0b24c1eb88dec13d72b044075", - "0x5c02bfa0b2dd815de4365823edcc1272e95ab254", - "0x75e7f640bf6968b6f32c47a3cd82c3c2c9dcae68", - "0xa62142888aba8370742be823c1782d17a0389da1", - "0x30988be9d80dc6eb12c6ef8385107f85c23596e1", - "0xc14a055ba72c25d80ad6402888dfd97df1eaef98", - "0xfc26bb673d1bef79ac9ffa27bd50152dccb79c64", - "0x004bd3562a42c8a7394794849b8ff5ad71c527b2", - "0x75fcb0bb6d94e09343a85e613e491659ce619627", - "0x9137b71e493598d1c22f587fc1832b35610cf997", - "0xf14d3eac5e8080f0e3d0f03773686925db5906cf", - "0xfd2d91100c786fcb1f3e8103ca1880a869828b1f", - "0xa3456f00efa4dd4bda8a111560b5f6df2aad062f", - "0xd47d68944ac7c72e0c7ae633610fa43795ece37e", - "0x08a2246dcb48db6a5a9e1f6bc082752fceddd106", - "0x0bc17bf4ba1f7e981b8bc1a95e44a203f68d22df", - "0x4c7b8591c50f4ad308d07d6294f2945e074420f5" - ] + "pathOptions": [{"@": ["result"], "^": ["SET"]}] } + } + }, + "request": { + "id": 1, + "jsonrpc": "2.0", + "method": "debug_getModifiedAccountsByNumber", + "params": [ + 6002534, + 6002536 + ] + }, + "response": { + "id": 1, + "jsonrpc": "2.0", + "result": [ + "0x564a97676045620b66c3bff9e025df714492aa7e", + "0x829bd824b016326a401d083b33d092293333a830", + "0xb19710cd90d74baf9338a789e15d655166a1a06f", + "0xe3c10c130c7ac43c21abbf5efe9777a276bd37be", + "0x77e17f1e534cf753a2a8f707e04964b29c52412d", + "0xf2190f5fe98dcbfe94cae8f2a315dc76ac5ab2e6", + "0xf4b5b3b7be39bde26146fcbf359e195310cc1de3", + "0x45c8c3ce6dd090f14001d83005bc75cacb6702c7", + "0x37d4b604b210c05f766071b15096bb0aa5510586", + "0x5418421de11897c9dfee01dd2792f960389a1a7b", + "0x87df173491f9de9c570cf039dd06d6bf9ec07ffe", + "0x8cce797b0d7e491f3042e6e1144e87df8749d5c6", + "0xb3a1e376fd1dbbcb13a086e074547701ea6a536a", + "0xf6083b449eb3c128c25fcd32c7e0b1da1708b844", + "0xfcdfa08971b20e8849a0ccc274732510a550445b", + "0x0d5eae90c6299da092cb0a0a12bcd89defb02d98", + "0x21c35f9fbb69ade2205cf9d01f6865fc941f97e8", + "0xa7a7899d944fe658c4b0a1803bab2f490bd3849e", + "0xe06be5fe129334f870792a7b4397495270743c54", + "0xea674fdde714fd979de3edf0f56aa9716b898ec8", + "0x14fbca95be7e99c15cc2996c6c9d841e54b79425", + "0x30146933a3a0babc74ec0b3403bec69281ba5914", + "0x32656891a0049f8c51cf331f0c654e4c27c9b95a", + "0x73f81cc6fa7c6823f8bf2b5c1e0b3ce7336407f1", + "0x9f2c0f640d2291d0fbd0f2a630372863cce49993", + "0xd17fbe0beb18482792d7457fc9c10592eecfc0e4", + "0xe4c89b9fcab29c5bee3971b698cca4528f2644e2", + "0x0069f3a8e64db8c7fdb05835755153850a36dcbd", + "0x08d32b0da63e2c3bcf8019c9c5d849d7a9d791e6", + "0x38d13366cd1a19ab002ea6021a4aa02b7afa6722", + "0x954a70eb5b5fb1a031a762bb6c6e5546c784ca86", + "0xb3775fb83f7d12a36e0475abdd1fca35c091efbe", + "0xd850942ef8811f2a866692a623011bde52a462c1", + "0x0807bef27f8d978e19ef1d9723c36caa312fff14", + "0x2772b5fcd39ff2aadcc1f5fa21cec75479ead883", + "0x2a0c0dbecc7e4d658f48e01e3fa353f44050c208", + "0xe841d388ed30c0988d5a2db071062c202f9c3a86", + "0xf65775cd5ca5c627dba83ce92787b3fd00fd2085", + "0xfb5fea8f0a93ce09334bfc2ce0df5f7b6c559eb1", + "0x98e74997fdec3166792d3617079176405e1387d0", + "0x9a4d6f927eef4b5f5a5da9765dc5076f312182ea", + "0x0f007aa14325f3d477e19fbe8a022ce2418c12d5", + "0x86cae9ae04a08481770cc4094805e77ff3cfe6e3", + "0xf9ba0955b0509ac6138908ccc50d5bd296e48d7d", + "0x0f430c4164338588efd2675a396a880f2bd4e8b0", + "0xae1d8e8915a26fce99964b2083cc8351036057f3", + "0xf4605ceea54add5965f0ebb7313f321648901068", + "0x8e2add6138f2d6da35dedcc1bd868a19f1064e41", + "0x73957709695e73fd175582105c44743cf0fb6f2f", + "0x423b5f62b328d0d6d44870f4eee316befa0b2df5", + "0xdc69ad15c432d55e97e174b483832af78b7ff862", + "0x39d1cd18c628f2edac0085b0cf35e08c67c31b61", + "0x61469f3cc14ebb4ecf03f15a7cab098a485d16cf", + "0x7051620d11042c4335069aaa4f10cd3b4290c681", + "0xa69484689b8baef0255399f32d389e0a15ac4d6d", + "0xb01cb49fe0d6d6e47edf3a072d15dfe73155331c", + "0x209c4784ab1e8183cf58ca33cb740efbf3fc18ef", + "0x5419832a9190ec26c5b5196157d4625f5c82c2e5", + "0x8b5e0d2bcf17bf2a151e9d05d869742fb8fbd312", + "0xb3750203d6b17e30f39e6b9ea9a22c7dc30709d5", + "0x28d70abd2efdfdd6608de2b343a414e3f4120c5c", + "0x2c479737111507f93914a234051d0b7e15ef5dc1", + "0xe99ddae9181957e91b457e4c79a1b577e55a5742", + "0xd3339a8dfe03fba58ce11bc90d6c35c7b970f5c9", + "0x825d5d0df3b2d59f69cc673f041ca91a296b8183", + "0x94130de66f0fb367eaba6e99aeccedcc83e983ad", + "0xa1341b2f9dc6d05a48d6bc95271cd0b82df7a51b", + "0xdfcd0d7c0742df7dfd7bf13860f6cd37963c7c59", + "0x6aba1623ea906d1164cbb007e764ebde2514a2ba", + "0x70d837e703dd75ce40a5891e3ebdb0648f7caefb", + "0x96e12b9ca892c08296678dc20b498c715f15d1f0", + "0xb9ae7e4dec1b32526c914ef11fbd6c34fbdeaa56", + "0x0000000000000000000000000000000000000001", + "0x0346537b1f999ded3256203dc404db6474aa9017", + "0x4401508a01bd8b0cc1b7e1c5316b8a16b74aefc8", + "0x2ff01d129f37ce59fbaf3aa23860744efb034a82", + "0x56628d1a4d282bc6b7d72b52973c809354ea9342", + "0xdae1ced3921c340165bba6f8260c3a1abc381164", + "0xf4cd88db4b31f8b972897f946282b91c0c750124", + "0xf296c068af7fb616c5d087f94d3e2bdc44790067", + "0x79ebdbcb788463afc8eb5a0e9707f6ed4acf97a7", + "0x8078660eb8f4fca66a37d0fb9a8aa2ea0c55b1c4", + "0x3c0a03ec54742b73e883c596303711ff1ee08c51", + "0x5b135d7e2774c801a73208f258123d7623e07784", + "0xbd4abef9a9ec6147747275e53a108607fa2b20e3", + "0x002da5bdf3e0a4d1d173af06b9946813dd46a6de", + "0x51cd3cf94a321955868c46473f2c36b5cebf7411", + "0x41ab75435668919bb507f871dd01e9762c2d173a", + "0x5994e187900eac75194976fe3a66037f4fb4c8c9", + "0xd1ceeeefa68a6af0a5f6046132d986066c7f9426", + "0xf8c595d070d104377f58715ce2e6c93e49a87f3c", + "0x2ee3b41f8413649ee1be42a1f2b10f2025b8e4d0", + "0x641fbb7ddaa90f9a6cdaf9b3c1c344e0d3d7791d", + "0x74fd51a98a4a1ecbef8cc43be801cce630e260bd", + "0x7600977eb9effa627d6bd0da2e5be35e11566341", + "0xc7029ed9eba97a096e72607f4340c34049c7af48", + "0x5b429909577bf48b7ba4959ba02cee7aebb0f37d", + "0x6a27348483d59150ae76ef4c0f3622a78b0ca698", + "0x99006250906ef07520735c62956e0cfdd99b741d", + "0xa4ea687a2a7f29cf2dc66b39c68e4411c0d00c49", + "0xde9bb11b21687e3fd9e72fc7e0278e8549068993", + "0x7f3d397286d277b19250f9ef642f44069602365d", + "0x0b32d64995884cb19e5c6b287519e19167ef8899", + "0x1e24a184d8219b42c1662964574182539ae723c2", + "0xf0868e619e3394e020d9c0afe055071191a120fc", + "0x3c94feba1e5e2291f9ef0efb3779f14ef4a3c64f", + "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5", + "0x9d9748b0a21cf0ad2707474cc83c5f165712e05f", + "0xe2b3d2ecf49383556c9d18983a2dc874ee718ff1", + "0x0168b06c9b7511a67d54d81a0a890e0f30fe1583", + "0x06795683b9206e1850a7d84226d0e47fefb1b2a4", + "0x46553c21bc0a3e06cc7405c5ccd1125ec8452338", + "0xe7c50a01d5c7e84c63d7d274f8199aaac7ca0044", + "0x4b1f3bca46002fe04423e1512d5068bba0da5b32", + "0x9e69b14fe09eace4e39ee12edb786cb39ccc782b", + "0xfb7442ac247ae842238b3e060cd8a5798c1969e3", + "0x2462b8c0175b473234c13432ffab2f397bc5c3e2", + "0xac6eb79af129c9dc8024ca5222bb183ab8e01e01", + "0xce705918817413955da24a2a1bf3cbad9d6abd70", + "0x00000000c0293c8ca34dac9bcc0f953532d34e4d", + "0x32be343b94f860124dc4fee278fdcbd38c102d88", + "0xaee71ef73f9c04e98f5c2a1abb7087ea5abce78c", + "0xb772893ca296626546363dc02ad2d6a9adf4482e", + "0xdad30f229cf0b9e094695e1c95a87969609d9955", + "0x2494ee4aee899f0f813a36b163ef62881686b777", + "0xfb77a964c685fb9f11ab08be08622bf7d8b61c9e", + "0xe6423dca37a37b438edfd053bdf0ab1b62cc1dd4", + "0xd60d353610d9a5ca478769d371b53cefaa7b6e4c", + "0x8db0071675cdb20bca105009a0c7e6d316626123", + "0x655d7989dbf22869181a95aafb94f42aac431dd4", + "0x0ad3227eb47597b566ec138b3afd78cfea752de5", + "0x498ed67594a93cb2b5bfc487b030dafa90996906", + "0xe90a880501c9b7a6e43b07f0dde712df7eaad0ac", + "0xe839d7c4d76b70ef80ac0927bf4248c3bdf236d3", + "0x004075e4d4b1ce6c48c81cc940e2bad24b489e64", + "0x1edfa18469285c1de8a307f6c2a231287924caa2", + "0x4265cfa8a6b1d941531f6d0acf4774c5b7b7eb37", + "0xb84437f8e7bc8300418abc76ef7d858281f6d314", + "0xe8cca7f750d4f446e7b3f6e365247dc401d95e47", + "0x09a99acdc74f74da4832a4ea7db28cb872a19b9a", + "0x574577eee9a8402f43019d38f865c3931e48d0df", + "0x5f0bfe2ac3c0a34eca548d377315c2d3fe60c84f", + "0x4a4330fa9e4e343a5560e0aecef290b4a9f42f44", + "0x7da47ca0de0797ffedfea9194cac9a8a5d0cd0cc", + "0x8fe869535e0af7366d845a641e3feda460c2861f", + "0xfbb1b73c4f0bda4f67dca266ce6ef42f520fbb98", + "0x89d24a6b4ccb1b6faa2625fe562bdd9a23260359", + "0x99163ad81a1613634e1cafc30bb1bc83e6e35716", + "0xa34a8fb76c137eb016e8adaecf892f74629faa62", + "0x060061095f16b1110039f14fc78cde263ca018a8", + "0x464ebe77c293e473b48cfe96ddcf88fcf7bfdac0", + "0x669fa9e30916e0c244849d4266560fdd6feeeee2", + "0x880aa7cbf18e42fa185de3a82ad6308a86e2acc1", + "0xa372c79e415e00f63348c322227364f8f07347ea", + "0xdcd6968c5e40a6b26cabca51e818b0404082c156", + "0x0d0707963952f2fba59dd06f2b425ace40b492fe", + "0x1ce7ae555139c5ef5a57cc8d814a867ee6ee33d8", + "0x251ac92106d0181dbf4c80c8441bf0d0c4ce0f07", + "0x419e84b3fe15b4e414db4662ce8df93a87bfccce", + "0xf64edd94558ca8b3a0e3b362e20bb13ff52ea513", + "0xb64ef51c888972c908cfacf59b47c1afbc0ab8ac", + "0xf3dc28037d87942433a6f8b1e5c34070604c78df", + "0xb4eb12ae75c9aa2f88faca82e49f285adc8b6d8d", + "0x8fb842ccc4563ee276c9970effcb67e7bc1ba5fe", + "0xf6d865332fa044e23aec4ab815edab957bfbf8e4", + "0x05f51aab068caa6ab7eeb672f88c180f67f17ec7", + "0xc179fbddc946694d11185d4e15dbba5fd0adac0a", + "0xbc832776a6e9342f7ae92036e2dc76a00b9630ec", + "0x23f229174fd83b7a219024ff9d920c4c2cdccd13", + "0x500e05cc004b5197387e8ca37526c3dea79d1978", + "0x9dcdbcfbd2996e607927c189c7c98eb19aa378cd", + "0x6b9e9e10abb661b56b0602817c3f4bcd7f4d32c2", + "0x88a29ce92821e9aa9d5d5b01b5a011a4ab004b84", + "0x8d4a36eeadb278ac6cda5b87cd38577fa00db043", + "0xb8eb7073716bcc6beed4fce7cda2e64da8ef8bd3", + "0x607a5c47978e2eb6d59c6c6f51bc0bf411f4b85a", + "0x77c23e39cdacba3f1e81314e164358fc8ad50ea6", + "0xbd168cbf9d3a375b38dc51a202b5e8a4e52069ed", + "0x739f745731c58ced32e0cd528c8a48332e612c2f", + "0x96bc2bc24e3886550e02b0199c07ef9dcb92f36f", + "0xbc73017522d1603ce5a460f26f45db94d7740247", + "0x14c03c8a88c22a57e281f7890919982a2ae1bb1a", + "0x5ce46f6fab9da1fb93edf4fa34d98e2d49a62e2d", + "0x68d7dacb9d43f1f12c33c58ad7aea54011accde9", + "0x1e466748604517c88dfd6c0fb2c4977fed7cf6cf", + "0x3ae568669be648088f6f705bd8ea5d001154584b", + "0x3f98f0697eefef581220500ad6b3bf11296056ee", + "0x90cbab41b057b4e0c2b53f8ebce73d7d9503dcfd", + "0xdd4950f977ee28d2c132f1353d1595035db444ee", + "0xa6a7d616dbbb6bf5343b37577aa5c319ef33e311", + "0x01fb668f734e272ac4ed4bdd2ae6ff0e0210f9d8", + "0x1bae8963cfc7df0d18c783175bbde7e51de03e5b", + "0x601b50be525533e7a5a8958f8176aee5798e3106", + "0xa82afb67d3882646b36ef8a3fdc22e974f363304", + "0xdadaff149b7391ff2f0b3f04480bf24d6c611b6d", + "0xde503e256ae6fa00b32988ce4b61d73f4013a456", + "0x005f5fe7c3cd6cd0b24c1eb88dec13d72b044075", + "0x5c02bfa0b2dd815de4365823edcc1272e95ab254", + "0x75e7f640bf6968b6f32c47a3cd82c3c2c9dcae68", + "0xa62142888aba8370742be823c1782d17a0389da1", + "0x30988be9d80dc6eb12c6ef8385107f85c23596e1", + "0xc14a055ba72c25d80ad6402888dfd97df1eaef98", + "0xfc26bb673d1bef79ac9ffa27bd50152dccb79c64", + "0x004bd3562a42c8a7394794849b8ff5ad71c527b2", + "0x75fcb0bb6d94e09343a85e613e491659ce619627", + "0x9137b71e493598d1c22f587fc1832b35610cf997", + "0xf14d3eac5e8080f0e3d0f03773686925db5906cf", + "0xfd2d91100c786fcb1f3e8103ca1880a869828b1f", + "0xa3456f00efa4dd4bda8a111560b5f6df2aad062f", + "0xd47d68944ac7c72e0c7ae633610fa43795ece37e", + "0x08a2246dcb48db6a5a9e1f6bc082752fceddd106", + "0x0bc17bf4ba1f7e981b8bc1a95e44a203f68d22df", + "0x4c7b8591c50f4ad308d07d6294f2945e074420f5" + ] } + } ] \ No newline at end of file diff --git a/integration/mainnet/debug_getModifiedAccountsByNumber/test_04.json b/integration/mainnet/debug_getModifiedAccountsByNumber/test_04.json index dc1b33bb..73b98594 100644 --- a/integration/mainnet/debug_getModifiedAccountsByNumber/test_04.json +++ b/integration/mainnet/debug_getModifiedAccountsByNumber/test_04.json @@ -1,123 +1,133 @@ [ - { - "request": { - "id": 1, - "jsonrpc": "2.0", - "method": "debug_getModifiedAccountsByNumber", - "params": [ - 6302128, - 6302130 - ] - }, + { + "test": { + "id": "debug_getModifiedAccountsByNumber_6302128_6302130", + "reference": "", + "description": "modified accounts between block 6302128 and 6302130", + "metadata": { "response": { - "id": 1, - "jsonrpc": "2.0", - "result": [ - "0x3516f261c1048ae862940695748214d1a6c98b20", - "0x58b6a8a3302369daec383334672404ee733ab239", - "0xaff69c67f5dbbdd088ccbc6d47cb9e0ea547e132", - "0xec590257cae67f06d4e92e4c60cf7141124290b8", - "0xfd2d91100c786fcb1f3e8103ca1880a869828b1f", - "0x133b1b081a02b34fd2a5500c8696e125b24d0eae", - "0x1df8cfc3f893bed0e90c995bb477ad8f1c2f957a", - "0x70aec4b9cffa7b55c0711b82dd719049d615e21d", - "0xee5470f864a7effd1b7a29dfa3bf98421b2db60e", - "0xeee28d484628d41a82d01e21d12e2e78d69920da", - "0xfdc655124042d54d6c0debd2e9ffc0515a79b64e", - "0xd83907a6412abc4e9e0023316c471f9a34f5008c", - "0x078b3c84e023b4024b9fe8e9d44790e61b5e4bc8", - "0x6a8f3f9f224faee581879c477ac7158aef730f5d", - "0x6c8dce6d842e0d9d109dc4c69f35cf8904fc4cbf", - "0x77e42674e1cc459dd116bcab3d3be01409481f9d", - "0x96f80db82d8e4636214597822a0ecbd9c47788ae", - "0xa361d098ba12c72cac9a38f7f2ed441cc20aebca", - "0xb13be8a263b1e1a6ef7b6cc0361d8662383c5670", - "0xd9cab683b371528cc826eb15911e026df5a042ae", - "0xeed9150f334c246ceda2bee09a7916f5f2c0e052", - "0xf5bec430576ff1b82e44ddb5a1c93f6f9d0884f3", - "0xfc624f8f58db41bdb95aedee1de3c1cf047105f1", - "0xe27578f8991887243521a5201b33dd26683cad87", - "0x4c3c13d22eb044e4396b41009410a945826fef61", - "0x8d6a2f62905f7fbd03a887736ef55f0b81915b5d", - "0x907e272d23b018a947e24aa54ce3ada7c67c5901", - "0xc7ed8919c70dd8ccf1a57c0ed75b25ceb2dd22d1", - "0xd1ceeeeee83f8bcf3bedad437202b6154e9f5405", - "0xd4aec90002204d408f667136dd02d41cebc93c11", - "0xe07b178b1d663994460f42e36d7e0b45eab715e5", - "0xfe4bb07aa6619e54f94796651258f4c50bcdab3e", - "0xfeadcbee4708960e3595d64f87bde86a5b9f9870", - "0x87026f792d09960232ca406e80c89bd35bafe566", - "0xae6814472dac803b82d4ea4588cf7af8b2b12d1d", - "0xdd0bcd9d179be54cd9ae6ff93999bc1dd6ea8ae8", - "0x6b4f1cf29e749e99a7f7dff05c2335fce3f10321", - "0x00000000c0293c8ca34dac9bcc0f953532d34e4d", - "0x06012c8cf97bead5deae237070f9587f8e7a266d", - "0x12141215a815de1864a88ea938fc41704a86a91c", - "0x267be1c1d684f78cb4f6a176c4911b741e4ffdc0", - "0x2a9847093ad514639e8cdec960b5e51686960291", - "0x429d98a3660fe4b46a2710687f2b3c33aac3beb5", - "0x59a5208b32e627891c389ebafc644145224006e8", - "0xd7e575199717fd0ba1855734613aa4a8d4735204", - "0x6fe28e54b12afd82782b05166ed2ef6395b33e9a", - "0x84bd7d6841ec04e88bf45cca62851414b7974f95", - "0xb1690c08e213a35ed9bab7b318de14420fb57d8c", - "0xba34776166d64f1d61fd3fd0c4903b3229cf99af", - "0xc12d099be31567add4e4e4d0d45691c3f58f5663", - "0xd4c85a7bb568e063e7caca065a7bec4178cde0b2", - "0xd7b9a9b2f665849c4071ad5af77d8c76aa30fb32", - "0x101f62fb0bd47814c0bcd1e2499909adac5008c0", - "0xef073141b498b93e73d7809da3f5c2b53fdb83ff", - "0x222757ef54ce72f9e409b932bd0d95949a609f91", - "0x2a331f283c141648877a385092352d386223e83a", - "0x520929c5513550159e233a086d5919afcac01f72", - "0x7600977eb9effa627d6bd0da2e5be35e11566341", - "0xb980b9c4f79c47ec459db5478ca32af6715568b4", - "0xd10ce8904f2a557c755fc89dbbb10f9c9209992a", - "0x46b9ad944d1059450da1163511069c718f699d31", - "0x873ec8a58cdbcb4a88daa6e3dc3d4443bbd3c442", - "0xe30a76ec9168639f09061e602924ae601d341066", - "0xedcc0058e6ac529a2081038c9ec5d129d3231dce", - "0xdcee1eb204fd0983e14bafb3ec66ca8d10614493", - "0xde1a743f4b19b81a15da3fe1cb47e106a5e3feda", - "0xee61be19de8230ea5fe4d65b937d761efe490cd8", - "0xf554715a2334a6f41285985e42c53b598cf08ee4", - "0x9931270a83ea1dca491170fb2ce486440a7edf07", - "0xbbd8173d7306b8090ebcdbb7d932d4b4e87c32fa", - "0xe75af224b7274a96a7e930ebefc959b37dbaa64c", - "0xee5bef8fb1244599af6dd64b2288bdd3f103ba84", - "0x0f5d2fb29fb7d3cfee444a200298f468908cc942", - "0x608602e78424a02d9e4ec22ed769de356d02a0ad", - "0xa52e014b3f5cc48287c2d483a3e026c32cc76e6d", - "0xadb2b42f6bd96f5c65920b9ac88619dce4166f94", - "0xd53555f487f1b0d46bc1d3a9810c7868ebaa0e2c", - "0x0000000000000000000000000000000000000001", - "0x00004242f4449d49ec9c64ad6f9385a56b2a6297", - "0x2a0c0dbecc7e4d658f48e01e3fa353f44050c208", - "0x9073528a4904416c27e89ed8f745c89a57a422f9", - "0xa7a7899d944fe658c4b0a1803bab2f490bd3849e", - "0xc2b0299685770bba5b84e8c01db783c24228b960", - "0xc5f60fa4613493931b605b6da1e9febbdeb61e16", - "0xe6e311aba24846fdd0ac3033db00b95dbf80b526", - "0xf9c93dfc2b1cc1bdd197eabde0bf9ed4accf2499", - "0x103cee8863ae6382497acee96ed0d257760bda96", - "0x3ec567229c6dd546a0ac52990c13b61705966aa7", - "0x7e13afe6f8c384ac7a04f327f5a45ef7011f7197", - "0x8e306b005773bee6ba6a6e8972bc79d766cc15c8", - "0x9018efb753f25f4f39c7da6f2acccd887d6a82a0", - "0xb2930b35844a230f00e51431acae96fe543a0347", - "0xd7bff6cece5bc4b5d4cb9475e676e87817038fc2", - "0x525816fb59585b2dd4adc27108c0dbfff4b9f06f", - "0x7f6148108a7a04b474c70e990cabf15a78bdbd84", - "0xba8875544fc74999aee429fb0c80c78dc394f217", - "0xd39f792670d5501bfbeff19bb606c8244836ff06", - "0xd7012929aff038bc99df38711b58f5adaf6e8b72", - "0x97551692c023eea6ea45835c739795caced0aa0c", - "0xde7bd306ccf1894618d30d63b5cffc4bd341039a", - "0xe1b23238764a0c076b55662cba62ca4cabd8f210", - "0xe74619c4e2453821895a5cd04f552572359e1f99", - "0xed9878336d5187949e4ca33359d2c47c846c9dd3", - "0xf4efe7ae788a94a8ec54495fc55c0687d22f96d0" - ] + "pathOptions": [{"@": ["result"], "^": ["SET"]}] } + } + }, + "request": { + "id": 1, + "jsonrpc": "2.0", + "method": "debug_getModifiedAccountsByNumber", + "params": [ + 6302128, + 6302130 + ] + }, + "response": { + "id": 1, + "jsonrpc": "2.0", + "result": [ + "0x3516f261c1048ae862940695748214d1a6c98b20", + "0x58b6a8a3302369daec383334672404ee733ab239", + "0xaff69c67f5dbbdd088ccbc6d47cb9e0ea547e132", + "0xec590257cae67f06d4e92e4c60cf7141124290b8", + "0xfd2d91100c786fcb1f3e8103ca1880a869828b1f", + "0x133b1b081a02b34fd2a5500c8696e125b24d0eae", + "0x1df8cfc3f893bed0e90c995bb477ad8f1c2f957a", + "0x70aec4b9cffa7b55c0711b82dd719049d615e21d", + "0xee5470f864a7effd1b7a29dfa3bf98421b2db60e", + "0xeee28d484628d41a82d01e21d12e2e78d69920da", + "0xfdc655124042d54d6c0debd2e9ffc0515a79b64e", + "0xd83907a6412abc4e9e0023316c471f9a34f5008c", + "0x078b3c84e023b4024b9fe8e9d44790e61b5e4bc8", + "0x6a8f3f9f224faee581879c477ac7158aef730f5d", + "0x6c8dce6d842e0d9d109dc4c69f35cf8904fc4cbf", + "0x77e42674e1cc459dd116bcab3d3be01409481f9d", + "0x96f80db82d8e4636214597822a0ecbd9c47788ae", + "0xa361d098ba12c72cac9a38f7f2ed441cc20aebca", + "0xb13be8a263b1e1a6ef7b6cc0361d8662383c5670", + "0xd9cab683b371528cc826eb15911e026df5a042ae", + "0xeed9150f334c246ceda2bee09a7916f5f2c0e052", + "0xf5bec430576ff1b82e44ddb5a1c93f6f9d0884f3", + "0xfc624f8f58db41bdb95aedee1de3c1cf047105f1", + "0xe27578f8991887243521a5201b33dd26683cad87", + "0x4c3c13d22eb044e4396b41009410a945826fef61", + "0x8d6a2f62905f7fbd03a887736ef55f0b81915b5d", + "0x907e272d23b018a947e24aa54ce3ada7c67c5901", + "0xc7ed8919c70dd8ccf1a57c0ed75b25ceb2dd22d1", + "0xd1ceeeeee83f8bcf3bedad437202b6154e9f5405", + "0xd4aec90002204d408f667136dd02d41cebc93c11", + "0xe07b178b1d663994460f42e36d7e0b45eab715e5", + "0xfe4bb07aa6619e54f94796651258f4c50bcdab3e", + "0xfeadcbee4708960e3595d64f87bde86a5b9f9870", + "0x87026f792d09960232ca406e80c89bd35bafe566", + "0xae6814472dac803b82d4ea4588cf7af8b2b12d1d", + "0xdd0bcd9d179be54cd9ae6ff93999bc1dd6ea8ae8", + "0x6b4f1cf29e749e99a7f7dff05c2335fce3f10321", + "0x00000000c0293c8ca34dac9bcc0f953532d34e4d", + "0x06012c8cf97bead5deae237070f9587f8e7a266d", + "0x12141215a815de1864a88ea938fc41704a86a91c", + "0x267be1c1d684f78cb4f6a176c4911b741e4ffdc0", + "0x2a9847093ad514639e8cdec960b5e51686960291", + "0x429d98a3660fe4b46a2710687f2b3c33aac3beb5", + "0x59a5208b32e627891c389ebafc644145224006e8", + "0xd7e575199717fd0ba1855734613aa4a8d4735204", + "0x6fe28e54b12afd82782b05166ed2ef6395b33e9a", + "0x84bd7d6841ec04e88bf45cca62851414b7974f95", + "0xb1690c08e213a35ed9bab7b318de14420fb57d8c", + "0xba34776166d64f1d61fd3fd0c4903b3229cf99af", + "0xc12d099be31567add4e4e4d0d45691c3f58f5663", + "0xd4c85a7bb568e063e7caca065a7bec4178cde0b2", + "0xd7b9a9b2f665849c4071ad5af77d8c76aa30fb32", + "0x101f62fb0bd47814c0bcd1e2499909adac5008c0", + "0xef073141b498b93e73d7809da3f5c2b53fdb83ff", + "0x222757ef54ce72f9e409b932bd0d95949a609f91", + "0x2a331f283c141648877a385092352d386223e83a", + "0x520929c5513550159e233a086d5919afcac01f72", + "0x7600977eb9effa627d6bd0da2e5be35e11566341", + "0xb980b9c4f79c47ec459db5478ca32af6715568b4", + "0xd10ce8904f2a557c755fc89dbbb10f9c9209992a", + "0x46b9ad944d1059450da1163511069c718f699d31", + "0x873ec8a58cdbcb4a88daa6e3dc3d4443bbd3c442", + "0xe30a76ec9168639f09061e602924ae601d341066", + "0xedcc0058e6ac529a2081038c9ec5d129d3231dce", + "0xdcee1eb204fd0983e14bafb3ec66ca8d10614493", + "0xde1a743f4b19b81a15da3fe1cb47e106a5e3feda", + "0xee61be19de8230ea5fe4d65b937d761efe490cd8", + "0xf554715a2334a6f41285985e42c53b598cf08ee4", + "0x9931270a83ea1dca491170fb2ce486440a7edf07", + "0xbbd8173d7306b8090ebcdbb7d932d4b4e87c32fa", + "0xe75af224b7274a96a7e930ebefc959b37dbaa64c", + "0xee5bef8fb1244599af6dd64b2288bdd3f103ba84", + "0x0f5d2fb29fb7d3cfee444a200298f468908cc942", + "0x608602e78424a02d9e4ec22ed769de356d02a0ad", + "0xa52e014b3f5cc48287c2d483a3e026c32cc76e6d", + "0xadb2b42f6bd96f5c65920b9ac88619dce4166f94", + "0xd53555f487f1b0d46bc1d3a9810c7868ebaa0e2c", + "0x0000000000000000000000000000000000000001", + "0x00004242f4449d49ec9c64ad6f9385a56b2a6297", + "0x2a0c0dbecc7e4d658f48e01e3fa353f44050c208", + "0x9073528a4904416c27e89ed8f745c89a57a422f9", + "0xa7a7899d944fe658c4b0a1803bab2f490bd3849e", + "0xc2b0299685770bba5b84e8c01db783c24228b960", + "0xc5f60fa4613493931b605b6da1e9febbdeb61e16", + "0xe6e311aba24846fdd0ac3033db00b95dbf80b526", + "0xf9c93dfc2b1cc1bdd197eabde0bf9ed4accf2499", + "0x103cee8863ae6382497acee96ed0d257760bda96", + "0x3ec567229c6dd546a0ac52990c13b61705966aa7", + "0x7e13afe6f8c384ac7a04f327f5a45ef7011f7197", + "0x8e306b005773bee6ba6a6e8972bc79d766cc15c8", + "0x9018efb753f25f4f39c7da6f2acccd887d6a82a0", + "0xb2930b35844a230f00e51431acae96fe543a0347", + "0xd7bff6cece5bc4b5d4cb9475e676e87817038fc2", + "0x525816fb59585b2dd4adc27108c0dbfff4b9f06f", + "0x7f6148108a7a04b474c70e990cabf15a78bdbd84", + "0xba8875544fc74999aee429fb0c80c78dc394f217", + "0xd39f792670d5501bfbeff19bb606c8244836ff06", + "0xd7012929aff038bc99df38711b58f5adaf6e8b72", + "0x97551692c023eea6ea45835c739795caced0aa0c", + "0xde7bd306ccf1894618d30d63b5cffc4bd341039a", + "0xe1b23238764a0c076b55662cba62ca4cabd8f210", + "0xe74619c4e2453821895a5cd04f552572359e1f99", + "0xed9878336d5187949e4ca33359d2c47c846c9dd3", + "0xf4efe7ae788a94a8ec54495fc55c0687d22f96d0" + ] } + } ] \ No newline at end of file diff --git a/integration/mainnet/debug_getModifiedAccountsByNumber/test_05.tar b/integration/mainnet/debug_getModifiedAccountsByNumber/test_05.tar index 47547c8ee4b6a3fa68e9c21b828b5c7f52566c0d..aecd9a57510c9c7ffab12336f396ee98e6bad79d 100644 GIT binary patch literal 36864 zcmeI*OYbGubscbx{V5t88%Dh!86^QSOfpDjhGNvyRw9uKNg)mlefRv!+9V`~M@q{`RlFeEBE;eEIUtkCwtW|LPC_<-dP*`>${A=ltE@|J~m| zKmPmg?q9vXdhUO7UElrg+aG`UxBv4${O&jB^ZnoTb6&jf{`fD$H-G*4Kp*k_^FH4{ z-(F8X>E}PbpC7L8fA!lRfA!tB`HSED;gi1{fB5Jx=kw$Lc%E;+oWK6=`fp$Uc!Mv; zxA!0ao4E@m(L&kr+@dKzxn0wfBA{=0u} z*!n*WK{nR+zrB3sH^bj_KaYW@{)g}W??(9Ni++22fBfe2+<$tmKMu|xZ}{a;BlL$K z`RNsYe&DZ$<)?}MKR@vwhRA6B>E_>b|2S^__zv&eZO8rWQ`gV$a!%7R%-6mQ=e>>3 z-M#nvrB=T9#wl zukpI4_Z^?%Sl)hE*5^6>*LI)nRdYY>eYdXt@$T2L%!`{%-Tl5EZ5x*9x!!$Su5zz? z?yhNl@AvMutDo=r=-kG0zkT;~P>-*jH^SJH% zdtciy-rcy2^I-JfX*v7$s(D*y&$C?HGOpL^VY*>B-KF2RaoyZ^d-rF0+N+l3Ue5V> zcW*b(TmN47aGR&|I_6=Y=JQ$vinUy|zouk5pMJXEW4rrfxz@uUjNP+$yBFP``MK7y zz1Mm8`}y6^bC|B<=(_RQt*~y|*W(_x%*j44L%C|6pKc%R29vtK=P~Tdd+hHz4(Bvq z*SS5TL45Z1Ui-RAIm2}?+wrc4mzbvhTCVY#`+Gb#Y3laDOMBJ09{YM7>urF~dl=qh z^s2+y@5i>^<20}1c^+3C`F?%vg#H-&b?Dcto9&fxI`)1s3TE)WCWBb7GSU{*zIKC$ zyzgf|_UX)8p0mHM_uRVIJho4GoZWoAYu@A-w(YvDh+%oAcRjbI>z;L349GOKSB$2K^F6-vSgv}F@9IakWqRk$67eI)=80$9 zM8r79`8kfMTy>B8vUbZnjr+Qb2Jh;}w{d^==j={fXnVHvsw>KmcEhur+iU4Q0?uk_ z_0wx#3mB`o^*sA9ZS5HDp8k2a;o3dg`}Ehd+B3q)ww=3}G73`yN?B2!Yd@!TzMpFs zH2ueU-}^LzLZe7JE(Yo~q~%`rg^)X*_hB(^*CHg1$NkRdG4Ip1_5Cn->YsDvXXKCZ zI>+r6_m+L%hi={9{@vc;8tjkZ>g~PxK9{mKeC)P7o^Z2>=k@6!@oZAz%`7uJ@87{IhlfY9ixv%*#97=iOT;?R)h<{TgP2y$N+kKeyce5Aw zM7@0(&&}3ruM#zu!#%otc#iitvlOSTAEx0RPV?1UzW$}GLnD3N&nsX&-llu*`?y3o z!?R4CVUnlL)kL@VlI@0T>bt9xsaA_*vN6uXC@kH(9Oq-bT%NaFb#1RbWxhK5MVuer zeR=kIm7yij`IhkxS-QQddyf6;)@it8 zZyQ6oUM84q%x&G~M~=9rqxaa>#y-lV?HJCp2sMt!dYQI)S%-7!JArGS4kLGt`(b3a ziZ`U3Wgqusg8o_ed9Vdd_%uD^y57gV4Bhnfds$KUa7nC({B7}%^YRSC-CvT-u!z;& zJB|G%B$TzWNCzjl*4?(J_lBEns;9U42iaK$IEUM^Zr@9|SjAbNzeuhZboPwr^}Jq4 zE}X_~>2Cv7@|Qe*Pa?*3_Oi&aZi{=ak9}={j+WT?E`e|Dy>^R6YTYf_XIbWy^~bE$qYasME5>I=q5pri&!_mGEUds=Tx3>1iOre@z|aPEHu-b z8*W{&o`)FsZj_wM$Oqx}&vFb<#W0SqQ4w{w*NiQ_^mPw?kZHk(^FAbn_*@vWJ7??G z#_2}0ZHa8}l%*~1wePjU1z_WQo$oSwhhUp~mmo*4cFG-K_p*%Ty&#(LbWy))vU-x!dG12O^loLA`(vJ$v75H;Ix@>QG<81`>|kKCB!!<9&}p9#C^`DxU~l(e z!7B>7pbq8Vx!kMTKtMhAn$4fy?&-H{hJH5>bV?HlfP1!mCKuk*2Bj}0r@Z6xj#AAi zx=3@PZ8w4OCb###i;4gOk;B?2glxtx$rsMpWZPN(*v1|6!>n878B`}w8zbz8R{)&i0cT7S=8AEvuGKSte8KK*3!RguNZl4rnPw&>!guq>L z<+<#)U}7TuBsc`5faTWJI!yjR+M59Iv*fQ}(fx)?ItU?{$+jAM=;*Y;0l4>-euuNH zLuJ>p!3djp=6ChzYPVTet35ZofQ4l4uI*?*1-tCCA8+0sdN4fp{Ca|~Hm5w^DVoNz z4wDMT9?BTu-RT}LG2@U~Qv=`2k!=F7oMj9b5e%uOHlLSs^>+OL!=57Xb6z7TFb?bd z^yR&d#|qsObZ_aeH?t2srusWn75hB)!@b%9<-O+a9fTnT!U(zAv{tKElr3fB{W5}T zXeqZH4GH?v#D{XU@b9F@Mi=^Onz z)F17=hD~5OW)EUsJYNT)17h8+#I#4P9BV*f*ug5s;~7yKrWPxv(Nj(5k3DZ)3v5JI$SFE9ja!1?nhZ z`K`u7)2Kig6?`dXQ@LJBi1~b#^vG2V!eW2Tr9#9UT-~Ow3l3DR`uNpV$0ii^N9s9{ z%9ZVVi5s^B(j5wI1)+R$i#dRakGVRl%}ZYszVBx`3fp6hp+YD?9-hMW$PB%p@4{gc zQ-)sP+iUIc?DDVJrZCBF$1f#N8%PP`U55_nn}(ytjY*n9d_0T#V!s!#?}j}F3vrNE zOqjgh&0QJ!Rejp;R+_ag6`L*ugV1Qc#>t`)IOYK<)xOt(5;j;s;UFcz{sJksDf{MJ zOyhctGT&U{Jbb-lTKsfxr<}GvtaylWfWzdCAB0JYkoI2da6MrDw1_n?Y$(QrJb*;K zn9q4sR%2h|A@GL|*@RJVH&0OID{NUHcp|()Z(&c$*pkh0Y*Gjy4H;~V8i0B z`-13$8wN3`%rf|d<$03#5Id(8q$~skz~4*I5u|P$TDeMuekE4);v9_k3LD0JJHZCP z7HIA|TYM-{G#C_%w7MD665O`#kwxosDK7hQK1B_?xx~X^5L)pX(|SONXxk|nR^hA; z8NlRWl>ks3?Ryyp;Qd`-@Lr*7u7GpJ;w$*xCyK69p?6DJ8=HqaJG}6`4wmW+;6@a` zEga?}bxzqS!xp0NR`*i<#75=p31T1WZq>oIdp-s0{uSPk?2+^~5oiPOUi~4fpZHE- z`H+2gNw;PPe+dwGmXqqqrkWwJcwqXXp%8LsRUbxinH7#pu|!Of$Mxro1*1F3}gBNVF- z+R-qB-ha2q?T+3S+%gapF>_=wDQu1#xm@a8`3Hc>DGVk4P?<;)VEUWTea$RU@<_vX)LIGJF%8v(xO=|*Ol$kb!C|l=Cw1#cfwy^nTsRE{Js+IvBa}(PI-~JWe zK$~us2fe|C7Wxw`-~vk;cA)&CV-#Q|9$wP&fmb?#92G9LK>8APhEUtP3R5~XcV!F> zpIogNoKEA7#ZrlbV^XsF%)=~BJqM<_oiCj7_yAA{(};1eP>6utXoDG5s)r0sOfGvF z4}|(eB0`)#6LNS*=|BY_^-CT09WkNSU$CQnYXGEdJWV)lMOTO5n5-=|g2J*V#I{ce zH<+#KR5m~WQ65%H%l5foq@DyR4?UeOOX9VUiD@3Wo6CFo z0rIV3Bz8>j`Arg3JWsM0`c>4%qrIfa_6c!iRyl45Kf5W^QWd%Bg&9ToKHxe*=B?~^ z4>ICf#9c+~Y}C9Spzdu5&Rtfr+%i99FT!PMidIwk}Qv^W}E!RrdR*sF$kbSGrsPDfx2qf-fW%!FePWwO845 z>P7*V_6$toGm&nc_`Q)Z@p;IeN2OHI*GY3#2w>CHfeHzwvh|EZQNbbivro_74r}if zg;s9di9~~S5u(wq9}%3vvG!wT-KFhSk5%Iq5jcQVBY{$#cDe#Y07iEd)AgUx+BVhC zM!v#t>}N0RBeaR%79l(z0St+R2ygE-2RB{p76@kR389?A1a)wOZOS6W$a#t3?N#;< z+1aC`v4_>VL%3(%6Le6EH~QSlKr@%Ccp5AwQ%SLelf^Q;n<$!}-)RZ5rNpE>?_Qt#e}s2kHHyrz`8CSc}6aQBkTimdpjD&XXbdh zH|0r3GC~orR#2yQ1rEK`x^^#5OS$T$>s#g!vsVPU!UY_QD&Gn_xmDiq-!&5@4-ljj zk*CCh{}Wc3HhML~Ap4zQg_o-wESi-JO=bvaNUhuj4@%$gB22mii}2r2_Sd0)XT*XJ zcOf5Vh@4`S;bTyj8Aos`JFbjjmx(k+EL7fsYXIXTaha<_#y}iRA~0y$jt2JrR1cxR zaAiQDXpM%jCNGN^i3wW)_?7RKxgjRln!0u{Cjluk1obk?Z068XJJ?CudnrQ962<48 z57>bgC63V^B=T9P5%S+jYAca#_1BzQG(g56wT%65z{N(J3I=x7r8*c3swA95ACn=} z@DnJ+ZPX)HLg6Uzsnq)#=COT30Pn`N-`FIz@+CVwI{-qYP&0R(q7n~gceYnODE&>o zgKt63L6R86;F0CXs!nw^fPnCMdlkvqP(yX-zgviyH`6DZs5m$gko{|n4mH|PG%Rvf z2U*XC_PHPf5;|m!mEl^sO2BIGwb@Icl8S+3XfMNv$Xf-zSq|TWQ6I9Gn5q(2$q@S~$NX}yDO0y5i8`-Fr~(@f+5 z9&m{-G8A`Sl|y4CM>sCK|LExFY_=TvE3=lEtjZtsI(guX0q`SWgX(MZS_s1 z0BQ{foQD}e=pnwb6hNG%3xJi;7)IriQ1A80yEZbn2$>jDt~7)xlyn%zwZvYmt%6B_ z7sKQfECcQ&Vli43t1HVYnXn4&dm(HD3p-bsR#~2sbI539KoSLFn>+$>mN7(A0`jgI zY`02)c7TCNI_4$0GAOborg)XlVQv-$GA`PjAJc{+T?#o_%Kpghaq%Pgn%`Z7&n3V zHXkw)yDl*|b#W|^eaU)w){(8u088QuzY}^DJOMcziFJ(S$LMTjSAr+UJzAsufP(aupOUZ+vN568j1Sybj*_@EvQX`69 zP~dk+rNTnQ0Y??9d3l2hXdoaOsZlV%Pf3wxf;wcS(8ch2e4XtRe(Zxm4iu3AwstZ6 z%MY~o!T}J=M4QiQuaf+gOeV~XwnhTkuxOFx$1hUUilP+9P5Zltxp_O zs7ZLa1@D85`B zvVg68>U3~uhg#KMg`1(P#cxG>WhM|@s0Pv}UQ0OeN8lznQm#U{Dme*5ig30?)RW4W zivb6~youemV|k3)tH5whB^g9iQtN?Veo`F~aBla!2a_7*tyVX9ZjlqQz$SJGZZ>YP zAGhf(U&!(y?D`UI$Us1dTHIfoWiXaDN@3!%u+{>~xOl+Z_6e~sgF!=B-%1&msdXeI z(7{`h5Xnm!{1UM(!bHH>ljYMuFo}UzW)!^m3UEpwr~0aXV~V%3oA-{A`dt zM93e+9wnuFcEZ3jtJ>Wtqsbg>Vpbz|C5>psa9s$N{w=E^fJ; z2=YisHc84%RhLvl)?grL+_ftfOx=PHK^6n+W(#EYdzoc(58(CC0XcjjQBli9fut4lxx1_2^ zX@G-g{_V=hD{Z3LV3U~+6FLaWj8v&?#9`srbJ}e+r+{>rFZ?&LRV#=*%`R`5@YAN~ zQ`du-ONoaPzcpxeOfUnRi28`&*yDNd0}25m1fu0DM1*SL?dsTSctur6NOSQ$JR7l? zw2O2`td@9ahN)tiAQcpdCzZU^2Kyjs^nvifgPNOS zicLNRds&BQ7Gi5U3D8I#P7__O5vF6O5-IO+hlHu>7qxpj$k1P4L%<O#)|2{-Gbg~fjZYXq0Juzy ziuGQhcL8whqpOwalz&kt6tEi$?=TkQ zD)88jVR+y$?F-sOSdUsM_;L=(g<#kq@PjHxiH8ga%I|24T7(9>%j}_e<#wquT4IE1 ziJ-j~U7q^1aC{B1JV=r_I{r$uG^7~|AI?z)(_ZCoG9eNlp#W6Tz!9SaXFGN-eOyXg zMO}F>L>#ay*I{5JbTKv4E-(UL)@7VDNWt0xn$9w6Evh|fIZZR%@4|k1^>`+UPH-jC zy?sL2*Ui+DJfeD7MgS8SUw`7To@uWVHc#(?4$$ck=!%{;JwP`s$xcPd_Ns4* z0J??TLnWhj;BS!bv|4&Tv_?!nYQxw+YQFg2udG>~ z5RUG>22LGp#<n< zorq_F4(xLnLNkj7N1!y#!7D3}%z?-HDgwY>Lg0>?J7gUscGPpkb+#Eh<4hp6>PsbM z7jZ>E(MjYOE%c~;k469#Bf!gO5jks|w``3TdDK&?ka>X=AzGI>@RWuV(;Y^!YQFS{ zm|!JX$e7H}V2AuB#n9T~(u++t#B94ZxF%FVJ(SCHi5U`h!V^o&)P2%B^A-iPcHT)* z3OeemoE@tXwwJEy3ANP4wXAcBq*fyvM-K}Z_XKwSgH=KnLsFrA0K>=axJ@IsH5CF~BkZK2zQPu6;k+lXPrx-XcG_}`IMny;<9Ye9wVu@^WDApMO zUj)RIHc}k!0Ea^O;WD8+VIcOhD`V5^+0m#H$&*1|;5^}lQ`gj;5dNoKy`5#q@!$m# zrt)rdP^2yCwWAMN>&kp@2kI2`C1_b@XnfhxTgT+ddaoVUfD-TM5Fol4Y41gz1=66@ zw5q3xNY}TrMo3N9U=9aJaDEF)VV#H^YRQX=)GpPaO(jGcIdO=rXK>6(doKVo83nbh z*22)GJ_PBN3-R{x(5q}rXt~$Hvo6I6e}V-NB<3@?c`0PBazjQVXTbXHy{s6SC`^~W zCW2SwI)hsfR)F=rU_KEw~-+m zuq~35jjZ#7d{K}dehc-2BY@gbs9uxM0#XgRCW5U8Oj&(Mg950HD*cxlgwK*|oRdV8oQUISR1PAr;IAqdjm3$dk6RsAXjSr-H(44+W} ztk&3QC2E+gdX^LxJV!J|;(r z__U4VOI*e%S#qU)s$a}03%U=;+P0ZBs0oQe;x4zP<#$9E$@C!r#GLqlqc(}RaRQ1;dSN@atTWq;^6Va-AHD6uDm zC)mu1#aZiPa0~5TK7Vo}bFpE9DwRuW)Dq|I39_6-qO3!~rV$9u z3^&ul9!E3?sw4qxH$A(H00ZHe7?t8 z2sPB~s7(ZTy{2hC$9-WO*dBQ~N+tsSZDtjE2I4^j@Wc~kfL-WIt9QAvNMjcxR%jimL+|;L%iq$hoO^4I5Jet%a(Y_FjTTh$~Q6I<_mMNiT%0$$=q+K2eW}e=2K(qyx@x zsy%I{v7(`HRrYKhj8Z}>eG!jUBQ|6X(;==RKnnScw`g~KLO`GO%fWZ)lnU6Rcj$ zhoZrHx9sdnXfJMH7J|%0&F0XEF^htLWCV){*j&V1k*E|Z4pgkpmTub77`e59VZ&g5 zg0Nt$AIJWx1d@4y=;$M5Mahdw5pE(1QRHkMCC8l%0GWkGeKESmaxGN=Oal=yC816o z76CU!J65n`sTl*I?j6}*z4}~>L21!?6{AEwc4BrkdZ<2QY8VkC3SF6HE<@aD%2zB@ z@<2~3wYBs*sR%$`zBrnF84biFL+i}Yx>IUE*<0~1H}DbQybNe;s$R4mL*jrF)6%^l zi1`Q+#1Nvw=;l5z&7(-Q>YP#ZM7I-{G5d5!u{)-qkesSO^?EdW8LquoZ5bDSK%3OE%0NX0HK+~NI&tL-c%L#7sh%EWiIz3V{EZfq$2{Z z!`rH7gpPD`t3&XKq(wNhy_fxyeIK%1AsFWYJcVwOj0C+%DR;z{k{wJixl-At=97og zoPk^O(*Y?VkWxHaFYUb?8JA;4@V!dez)qNhkqt{l_7KXsc7V*Gyq7X6MFo_snoN7Y zH+>qwf^9125W}%z3L(YG9&1^A z<>D%igF!>)v+>aUWkrEV7QNn5`9Z%vR3}u>Ix+l?X0WbvI+LW zNJcx@FM*&6w~$Vl!ZPv+*EXM*PUEeJo0X^ml@_AqMQaCMUR5yEf_#}M=jX79#kse zm9b2`3+pbUJBA=lNz8Zcy~rWL*NQ{ol*R9Hwdt_HG}%?@H#1$T(Y4%5mx<2YoUDW@ z&~zIeBy@s;o>*)ma#rD2o&MrxT5b{{_)~X^k=oLBMxh$ND8i@_1x)*d+W9{o63~%D zMRTijJ=~V)EVyzlC^Y>@?XBQTh3EsD4uvPNOXAeNflSdbm5}G}kkLQYo%N2KJpy>IewV_;jq&dA*oMa<&Ixu8bktDMn4a zsiM`6mc}(Rs&q_%z!S_rc?W%yRw@2SRkK3OMNR-HPsrzF>)M;n zETBXw^&)oxai}^s<>HcHqc;)nE!`^fU-3DDdDEIAjUI=DDlys#kQ zr4e;Gfe6tQJAxNNBl^G-UsvKG0bf7^qsLfAWfHrcBU3WWp*4n7EE4|@Q$hT7$HOLwd685(BdW&YI%wjbl1eTDeM!_ZCPi8_FyL z3s#D&_%HPF!1K5SQ&F)VWfR+_yEm*J+8|;{X9$T*>lZ!6Lf8wuM zS~LcS$Y0zu@=SX#MWfOZ)y}b$NbN^q6UwG7Dxi^`a)x5v%h6wQie-Yff&+}uupll1 z_%!>(Y%(b$SbHzxx&qe=(0Ofn-Ainn7&^S6Bt)urz6g+2@Wv3$M0c~mUD0S0Y|kP%bb*Gk#UF$_F5Lg5opk=su|`)5J)dpPGuD5gG!}n zkL=nIzF`4s{sPt=`ffwoCb*)UvB!bXQ8zhZ&Q`NlAy~9DNm|W@8|7MPuZo_7lr)<}1a%NHg#TD} z(d$D}QHW5f0JQchN7^uiRL`=VOf>(RWFR4@@ZK>K3OI9Zq5`p z)|6CNaP0a@B51F2_H=pLVVyjQmX(4ey1R^Q9V5n$-TqO>%NiKiwB za|k&_623?@RF9N<*;DqNLXGR#DWrr|3`8hVhZ3l{a}MJt{(Wtpz3q~CZ{^`)8D24w${r=DK#>Llk!uFltMIfSn}2qqR+&mrm9z;Fq*|3Ng8}eE1k_H zAYv>G6c!c?#QPWCCISp;G(ADwD{0RmD`AJZ+Tw=SoMshkq;20zly<@*OJ-8Jc!&s4 zcc`R@QfzLHJ_HhA?)EAYit{3XNpG#6Hda|$$ z-`F(_h#o{MzyR?`)m`~%>j_7bgk*H*l)&po044)5hy1kw31a7%f7S%8-pyPVQAZF-;1;!{LX;oN^1c8 z2mO8Bf?c54hnZzCXPmZIg)Gtg%~=yC-=_}tEeJSBfXw)7jP-9;Vtdu+sM*g60EXh@ z$)dePgHa8U>0?;{MdlJWP;cH_5w5t57^M=DvjjKNOnM~{+i_x)h^?yjb4o26TG+tD z(5O)IACm<&+KDLa6ZO4VNo8&XSyUASnLcSos(h;kD=4R{V^jm8YhHGsOpdvywdo#m zgsYi=FZq4&cGw$}uH3Gtl7c~?iLIcUicchsH;Q1cF|1?UGfZGKr;1>6wxVYWE=m)w zK?dORZ5ODAbh+XQ`4GPH1jb5D;55k&)r|8V&mZR6&@*y}dZxs@WmiHXu~hl%BTbyJ zkuGooDf(58DGT9; z5J`W$*R&25h{P;MSBX^ZibCioT@wv+cnhN5`AO0vHBj-l;71Jx8%JG-F&A>K(D0bB zlq)&XQSQrE4k$-eze#@836IGjov7y_*&LFaYFW_shEj6cgmQq?S84_OKUNH~RCFj= z0BaSO^D0~ph$+9hr(~qEzx+9Y0G1JL;{7r*AgKhO^AFG?4#8$5tr0tAn&8Iqp+J@w zPZWh(FXy1Ai^kCs*j`70wPPq$NV_rhI$c42O_gU~X|~Yzp@p31QtemIX7L85GbakD zF@jei6zV!!AjT91Q$mXpqO1*Z&k>_6!&p@kskLKo8&6F$A|%6EHn>M^pOA5a@+!+S ziKEoW#bH!rc6(Z~M6~5BpQ_%nTZUK`wW)gwZf(rGa4|nRV&KuRf>{mu(B8{|SWZsP z+2uOm$nAI(1?8_5T7qT;H$t$>7BGr7<99@Fm;uw;Ft;RBA}r+!GORpjc!&F<0DN$+^~BprzbV$ z#8?%t5}ELr0GB8O{N~s(dkb)MvRfH>p&iA^xh241s0Iygyr@iC^ zDM;So3fLkVo9FUUXk$(lsUfFdmNieD6m@gtzMAv1;L~u6e9iYNMHMv3)^mxyA|Q5v zG>$J6Yjbpx2roC8_;f*=VM^0e-pix}4dT3DAlBE^CkRCxB}7Iwg2f`>l_&}sa!zSO ziaTMT_zEQ2&WKV_l{hSdNAxIF8&S<$I1NLl^q0=;jYrpd*BFl!kyw%)DvnaP@(|&K zm^J;pQi#1Y)?7XWDU*8(t^nKHS;n(Lm7h}>@CC8ba$#WJu_+dO&ae+Ch@2b`r0_xWjDCE+UpYT{%6#dX$?_aAE{ak4juAk|ViJdA?NKw9XcJjU zQ9^i8Yeb6$H*#K&U(&d%87>^9R(1+#t8laNu=ot@iJ-Qxp*C4uh5!=-4wA7Q4UTVF zKuCgJKSy@v9HAgkibIY-Ve3*>e${OXFW?lM3ap%Gp)g6ray$oJ$x6-%Xjb-B<||*! zhEB;gPp6&0Lq$nTkbVJDr#e(%0h-z;bP$Fd4@vQ;LmeVCs+%qDbC!{J2J0cWdYYEH zJpCIOgq)WM(!;V=5fJC3Tan-JbTqJnQk26myICR8ioSA%L?)W0y5zLZ7yQ|KRiV?A zFERMY1V%g~PrQOifrxfwb;!9(kv_|=vuf{Fzz`CVN0f&Yld@wvf6IJX@jUl&pJ8=AB&gii*XDWzbS? zM}ujDzGiH3UdmZK9n*eJmD4aydGZhcB4>QL3X7!(v=h}q!kp@Zoxs;TCHAOuw5SQf zbu=idWEWc*PLQ`4s|QjREt`}kZsZFlC6J1~tRuWqzHBgzAG2t5!By8) zyfM1N-?UHYPB~@6!#T>G(Ln{Ahm$+s zIIT<2M~=ost+ZEx0Lp3wniPz7fiFQn=-Hu-HX{xeJY}o0;0Gf13+0^PG zLaK#fL)xY>|F-Xi1yF=)i6D%y<|5269AB)1d-T*Z4t>^;h}QPX?J*vAjTT!_0=FA*=?vNSyjB( z08YNE#2O?l!05ac<-PocO$-HE7$8HMgTyIe+`M^8BciOe2io_Fm7L*Ala=O3G0BdW zmOiaLo-$wF$_>v!*xq86^ z;z*HG92#LEbLn#N0{QJ#mV$$ zG)gKWu;W~aByOV6QOPljKm$UnD{-DMMhZ1KHihOqrWV*wcyl5NM#`vzz8Y%Fd?9LM?-B=r zJMIePRESLo*^-RYK@d|Oly#_%$Hs~8!k7yc3R+b#fkswOvqYvS7VAo_Cr!o*$Zc#X zC>E4p=^sloti)~=OVeOTc`sO5g-d4_H$(#0Sy&=3hLaz}gHwtlwzgv^powcNc6tH; zL&?(kX+Y;c${F`OXWU5i?Di@!nUu_I$JykO@Opx677_~L^Tlu=EWWQq?2qfJ7tG|- z=p1H8J;MkGt9(o%I7lYT5=E8&n!|upY*Nyqow9$5SGpXP}NEdLdjic$cZL$u&uYvMgiB%BK}RPh?UtvGIFyzI#(c!L@GD4PW3M+t z6Hv>EYDqy!sG-djEXI(r4-E#!w)fHp6@zEmVfhTG#u66tG%i2wzdA{K2s=g~NGW#1hOVDpyH+w3iij*Mty<(xo1oqUr|JzbCy z9azg(RfiYKS<~_B3mSZmfHPyyC#ILULAX<0;5P&#???gTto zsP)G3U^T^-gT}bHkFw0!k=|$KOg55oFMH5V2#i6%@&d(8C}u_JqX9_H(0NqXb~Nxm zte8%w-E?C21a{g!no zsHe5T!1HgwzH${!CExdKuc#9}r}Q?KwL$oknGp=sT_}vpQs$CQ5Pj5ahVy_O!_mH% zZ?26|Ml-9UXB8$0#y|#Ve~OdpMi+YiIvN_afJp%}W=O;Ytr4%CLoTg83Q>q4AiG2x z$7JMl)1#!J=sp4=U29s2?-^i>U06dp|sNh3;jjT~HkS>+wj4~Q69tvR$n6BOe7_mqN8cVPR$;W1 zwZZ%p*$Fvj+(UFly2VPt(6E&i5^zN-Y@bjWl`oVt`b?_fC;-#XVV^2jx)-=%%Q5@4 zS3%Zt99wzp(_&|K0I#)%B)5<^(0SHlJtK&HP5Yc$*)HVS%>fi{s*6rYMdYgM!zorR z+i?OpV~XCx7Kp9X+n~N@=hghlJwEJn_W)lT=*?oZJKX4K=M`K?*8Q2c;` zrO@`h*o(r=<**F3y@pw!lEGTKaC)*cm~lS(K)36;3L!wrZr!_o z&d&7z?N#9;`LrR@y{(_2Spn=ueUJ%Ho5YzzI=ZD^7H%e{qOJp>X*Ox%dfwT Pz+Xn-FC*~(#R&W_F_l~d literal 14139 zcmV-BH^j(7T4*^jL0KkKSppD)MgR-*|Ap_=0DxEp|MGA{R0{w1{%QaL2mmMuU_yT% zed;|AJ!6Qf3)pR&XLh01qh(gx8Et~gYi!wy2xY5aHq#4IW-YZu+Rd%ATCA-akyJL) zgK4b7ZCgz?Z8pscwUAXRYepKy4QvgzhS&y8Yi(`j*IQK5fwsxAz^i7}l`K>(tjemj zY^t@XR*h=f)=JfBQne;+vl`a6uH161YBMI=W|w$HPtyRJVx=m2fB*mh00040`~VY5 zQcwT@000009Ka?5WMBkhVqq|tA*Lpn5mf&GMiQz(00000000qF@@Udvg*>N}@TQtH zXaEmT01X02f(=bQ6*iL%H2}~apwXHUiKdO}o4+mHsq-HP1RpCBNgCyAUbvrJan}|2 zYg*uspMZdbL^eQn7#RgN_;`7;9CU1(fPk9`2_(%s5C9_f07yw{06>pog9oYHw?#uf zlQ$-;1IBi!VCrXdcIvofjo={8x-WJ|UYQY>4}IywM2 zqjAcd6OcI%*%A>dknAJE$Ye-%k7uP9X&buZXsvw;vEgFGW0TXhk3j71LZ<_x&O~sE zuLf5nRw#tAr~EE$%lZ7hdqe zhYvv%$;ucJ6^QaePq<(=rJ>DuBeF_IYe)!FUI|P3W@?L-TnSrHE?NQSC=jh=jH%>a z1XoG)9;Z-~T=<%Op32Ls+2jsOWc4@{5j#XvJwZf~iF52~n>bZk?-#UG)yCz;18mLq z+@gfbs%o6#)+C%lO-$A)CB2f-0A^&IIn$SDrgdJ?drT%vGqX*z!V}`iz9LOPg_UrU z5rb%z8tCbbC1fp)S@c1!!gfZELduWIl1U)nAY8lq+bMg6$~K-ln-BmZ9y@qWEn7_% zT@lGLM1tGdb(GyT#d8wVY%g;j(=?J4Kw&^UE>X^gw>An~b~+X{9VMjQ@1EuznV7l8 zo!i&IzEsAoly{>%XCnJu0!#d!dpSK<=5&^z)|EStk&YwzQo|b6_wq{W_K-&rq#SzG zeoVS{t6DLCs0HQngg2|5`gggg(K~>cdKzp$beVpjtcz&Ts?Q^Y77}LT8DS{5z;5!b zrUWIUyoyedihWrd_?N^iHpm_dc2~`LGb!J@3{idd@_}`E9Ek?gG$vtC89u8m>4rot zh3GKaTOK8=PZ^@5_qH3P*a@U`w8hjPVcj$lTVh0cR9d*E^M5*Cqn3UFw!RpRqGleiyFP4(T&KWX4!PSVGI@yh5xo{Hj=NoA8NCEt#frWg z%rW8vIEbD;Q)O!4V=uJtE#}@k6wm z%SgwdZ3N5*RPcEai!x_k8vq zSq5fmRLcA@wsOlGFRbu)o))zKKs$~fnqQNBRS%{pn%E7z`eX0Q@%8@23Wi+Gmc+z$@4oLorNf%1a*5`iGsizhR*_ zUDL~P7Yjaq`*V<76ACxUGQtRs3zuzYg8YoU3m`t3#6iP()=)4u!=lNSV0S z!9yVtwD*;3vwj&&z7yxg-(1gT+>ojs)5lPgSUKSbQX7P*2#ExEr}+NwZH(VV3d zOB`=Y9Iin`VpP4wk1L+YNoS8GNi!k)Hf8S#LKLW2I}yok9ESz)AVj7 z*iI{N$UX|5Hte6!-uK+0bK#oP+O`K?1&xH{?BoTn>!j%wdH}csNlT)2q;3OZNo~6tUmz4Nb7OEcN zco5N{ch6|BL7Ux)7-zuIIptgM`atTdUDM@lJ8@LXgT|<=s$w{}z6%19K2Cf&QRTG4 z>hi-q5jdrs|c&_1l-vzbIy@_XHP6kq(NNB>d5v~c6`zXp|dgOWvW}*w6btEa&=q< z9IW)=OqY;5&I*ESm=H?X{FW0pN6(fqw{t3>+Wl-nZy&Fk2gwn3jIi;lGv4$oY7-5C z=NdtHZ8vk;>a@xW_d)124cV;-fTTUUf_I}*(yfqcY?j~1@(x&eOp6d{8#d30lELo#zXaACZ z^B8v0^f#R*)*hHDJLtMo?>wS^O2#USfFbuq#LD&Jl;3G}^PJ8z$HmcU0-~dAF3cLj zVz?tTIUHlkK`eoCuR}~8H0__PSNf))utkj9?JT#!1C;7 z_3Ijzy)D+iRddg)k05B0GPqYG%(_4%P}k{rQX6YkkG{|&GitH#J?3y8kmrM-Rx1|L z58Vti%eJs^l}9cv0pA46M_b z5qKp)JomenX*wRLZkq$1V5OeG452<9?x^onPF;Q95j;LN8YM4A0T;~yI)FeubtdIW z{L|$2>aaaxI+A>xM92l}B=u>`Ems#-d*TBXbSSJqS8i0X@XcmMYcts`*g$)lJP;V3 zXNrr4z8bLInsQ4QXO{0`y0x5_=I<38_H0$=%&@CRVicQ-NJ{`yaBQ zdr}ZtuQGZl@4qMtXVTBTlaU>V)h&DJHVes@p`l)B^U1dh-f}D5^p{<9@<{q3sdVM) zh$4x?c|0QrFtE3q%d~X9)i5t!Oyz2XlUy-9e=YEaN0CRV=Fd6BpUl#9cr-Qr2qfjtJAG_3Y7 z88+-{gMpyGh&q_M@lFTpB3PIghOTh*X|DBw?%s9FR&42$N)^dqeW~*wNFM7n6zw zf^vukB`O&p2HdZ6wdiqfQKm1CC{fn04hjnko{a8==>sKZA)D0T@YE|q$iCpt?CD}^ zhhxKGf((1!Za;fNtz*4-=+L#ttIzDG zMQK9fK8DH3vx_k>rbRIH-C6YqPLTs7mOg>H{pEe2c z8-Ybb0FY1bhyq@FwO0e`7r_Rouuvo{XPGW#+t1DRe!S3(8EUxR4;ZgFxfrLlI8{+PLM%JB8fs z+0q}y!{pxXsqp*C%IL8ua3-W27`4laz!WvWc#NFvKUpZY6?@98N@_9b#Ls=b$Pt@H zIa+={s`{th<4&XUw|8g4ChcTc1a@3YIwX^Vy;$op35Eq3TonO!l|6{>-t}5l;KwxL zt30)!YHeRd2jxKzOkS z6+!9|Y?inxC8Z5ZF(^rT7($YlAa_$2rZEPCnR>V82Qyl70$N1#ce}UNu|;wu>c~Yp zT=D36o-PPVW55FN9o)MNR#MYYC|-P^36eucx7#P93D0?Ou%BX|I-HnQ`8VmL-ur7Ak*GI`DCEqZz# z(YDY)ilo&y30FAd03q$HEbf5Thlg-&+HfK0;0$*qmz_zj%=0OHX%10ARYsTU0)jK0 zqOzl!>Ng5H{Uwy*QE-G9h7h02oUKEvWk}0?;``Om=a(*m#%pI)MmeT)Sz~f^LcA)# z@e3BWn!7P?dAvIjhQ-*x@3DMq#7hOMNfnu*Ta0Vq3_1k_@4D0@yh{WTN;cEhVJJ!C z_K766^|WZ3{D8FG@zfP(Rmnjv2XIHp$}(feBMrc$EK#~VzT>|SVTJFBIBQr_#Oil4 z^z=(n`)$)~S*5Nc)sO*g7q-a+d5=Zq@__iS8?+D-qjSpIQ4 z1>sjGiF_AnCrqdB29v-%lB#fAn9np}?*cDcVBIu|nf4O{tWqK!u@G$zUGb@fgpAGs zaKTPH+jM|<-30O$LQY)b4s7mW22}4_JUC4LUVC^b2c|0LK5+1G6#}0dVn-?!&#O*1 zpBJc90)xHRrF?VU+ae2<# zrYc4F;cT}dA`!BNWG?5P+3u%9y(6@vW7r~kIGF9_va$*z4&%)#cMuCQL)~i$?!Na% z;8Repy8bU~7np=4L7VOy~!l~0j`A4hny2oQu+3xvEoMe z55m|Akm%Z5-Az)l*bda4!6Nw!dmwNUxVg$gZ$74u&0|TRE-t9Tk*o-FisPnOOpm@M zD0o}#bR;k*f20hG)VhN0sRvJ{WTed@4?5 zEAI%n_=)`&r32y*Jm0pOeT7&{!E@T4W=D2{%v8;v3>DF|pK`Ohyq$=CdhvJAirfiS zPM+yMTVE7*9l)gdAi2O1nSski2@nMDT)j7rkhMBWk5PxH@7(5zo=#VW%%|sikr=aN z>+Ct(pP2g?{%n{mT+X$S&iU2tCXzg^D$mLk8ys@N!c;ggl(rjZN1Y526NC3BUz<=} zO0c?!&7?ALhkLwiz8r&E-n7sx{EXTg)*}h(O>lzJ9*S;E(N+d(0K$coO|){PdaXem z!!_miNto~CbF&RbsG&Z;O_XBnX60*P6Ds4&uU{9I50_PIFb$LLQU!Zm2UKaSeOE%kZmU^Is2f;8Ffv$0grO$P-~3`Pg@8`lB0X&ywa zg%{Xvwzy!nt?NMzg1OzzR|2L{3fPkZVH)l;+(Zz9U{}}&%wl_Q6e4)ocqSevbY-_r zzE60J@y`sfu=8jRr;BCUxkyqRD9Oa7U08LNmE#3^hKkyNb(C&oPyC@~RUksq<-f-Tmj}fhLzEE(p-SG(>cTeEU`*ped?<4F1_up)7OMfR?z?%)2c!fGz6wn zO+&hImFd?vLFy$@xyOyr9JZR5ABw8|znJ*~DmisQvy+B&>bF~NY8bWsjO~v$0H+UST-I9@(m#EAey!I?H zV(?5jC{q1RMcvznr_nNisC={Ud68m9NUScD_C8%PlLPQlo%b8Amme9Wn|sJw3(IrE zOR)iB5C@#?h|6`v9r zTm`{{p0H(o%GvHRfjiFUq7b2S`UV%!=-zJfOOnMVuQcYa31VHDNVpKZFHp`ExsQ>x zPzM{N=rItj4-JWF>YMPf8E|FcH8O0+b-b+g+4qTO-lOk!8&WOws0%_yV8M(gA$OA~TF@SFw64Mf*&#ytzm z%H9S8ZU#@cfnFg!l8Pv4Th-|KB;TA)px`w>TWgp`(*BJ=Rl0y;^qnP+g=MnTg7r2$ z_T0SqLVDI^3F=tngTW?of?>^YT~ul6*QX=wA5)PD8?ekFn?xc=Rcq&zP{Gq3Csu% ziv5)c;edxo2cGWp+;u#HOW&QLyrBbgHQw^^#l;`rZyKAxBHMYPhwNsZ?#rUQchAyr z>%NH-x>7dua1U(4;;FQb`9!Qw++cd$c2exoag(>#^{{hbcB&8pdK6$$&^RPYrxA9G zNyItAW;PsiLyW4#ORL5CfpmTMp@&ksnWHc+wd%di+m)fBvpozrMFmQ%fSy-U^jcD8 znw!P7QAAzKr}NO=jvpPYkCckwNEn%=fSs3asBsUtacJQdT6RO!(cqB3q&z;>xp@y? zNc1|PZ*Ub)157NN;7}1?8CwwXK}=?zWpYj)m{u?l_B09&2x!ZHOlh9Y%&(SBhfDErkR$b}=9nGoIQf=7MN2GgvRCo!7TNGI0!mckct?5P7%EeEW?w>m(2~UKH z(xQb~@3NOE;>!7@=!}F0W#PSbP6NQVZ{Rd!b_~l{2bdj(;5e9o#i-O$1rh0r`b3OD z@WF{g82Kl9DKY7$$4Q_f={1)eM_OJZn4u_zx8UoGJNpqZ2N90^H&v&6yS@vBSS7|; zc@>>y8AaSKZ33VGY|lL7Y{>`&gqekeDsf}dvcAS~!RZGm(!;k-&hal#$d*E+zM0fg z;cDisj!@@%4QiG=V!P~SBt-Kb{O^Vy9(i7Mhz0>Uwj1sxEW`1`J*y3~iKO?Z0KQP1 zeAm)_5br%FJlv2wGHU~o*cd!SiQ@NJj3bX8*)Iic;`mRwCO%E*kQkN+fucsX$Rn8e z_UF~+zCd?^N$e-L7=aD}&S_}iQG{EJ&z5`q_fr_jC4GB=a5rEp+* z+O<|WA@#JiRbOu76w{1n=oF||1eoWw9Xr=q%V09AHwJ>76Kq*0-#(21$y8rO10NS; z-T+NXy)U+LJTD@hq|@lrHwu+=~EU9e5Kx@u3mWfo(_PKZoB4ja6PZ!EwSYpOht>etrGYOjh$o) z3wV^z&Mm?9NMEtrwianSEuFjTPmF|ipqhx+X1R64+m2Buc=6r3+Vumw7)|eUeAM7y zGVKTrBP2*fkv-yh>}Eq4_PxtOn)BAyaLVAWb4sBDB7I27UV36EnYwz!piG*{m5Pc1 zJ_jqkTRe#IRr>0ef*7TG!m*@rN#dNn!AX1OvUQh9ql_KO>*M6pxnaeg@|T;&aqO=U z@^)rg?UGzo`-plhbYyT^emI^Z2Utk!Cupl)or$6Pos7f+IDc)OcmSC7T32j|^D2pp z1-Xjqfx8panZ`>j;V4vF%m_fN03(h<&IAcfd^TsO9c-*e%wSAr~5_Pq)(P^r*aV@=8m^M%Rf^{b)`O_!xW5!!&y3ba7;$OS6z)PEB22T6IX0DJ zf^yH^PD~V)xzcTNk;Yhz_*(_s(j{u%#qmX|R@b(I3F%0u3>IU+KI;yY8W^ugtPk0> zXU0;7OJ(TyT%$di3r=%Bes{Wj5yO{@8CVK}fU3?DX*6tU@XvIOg&6p97_--bw6+GZ z(cBWND#K3gc-&D}+Rs~SSHKj~f%lZg&(_m7Dxl+%p~VS}?d=OE$!BNH9`S{A(92Gp z@z>Khf?_kd-Vh&T3>+R#Daec_i3=jSAWb%<4p260a{x<0>P=t0wuZgX=HmqQU}I)C z-&md%)J+3L*?2bwHvq(%Sd><_jE54QB9VAnpXi;6((T+0kTc-9&4H-`>)m&&dQHcC z;xva3*EkMAoG*DUTy{^>LB_xhsS7|C5(y;k!*9LlROktYD_VsF++|eSmpE*5)WO~s z>v$lvwGbLNSdC@$!LFPp2h>_z|e;D9C~0s+(zlFv$tG1nkhrx;wBa%sqs@ zO87Hkb1u-OGS(#0k?$>e>HrtLC%86|SM0E5q>51>9kvp1Xz$1IwkqY7?)i;Vas1NT zILZ|Y+bVW^DYpcZA@>1~Y3I)Q zI6?wI%9%Cei07ovWe;0x7R#*TDHejk1HMbUiR!kOezN4V_P&{7FgJm7VDJYw&&%~w zm2jqd8!1P5#D`qV2d3K`lsr^k)6!LRajI6N#Ztdexw%K^Y4CqAqz zoZ1mEgokX-vq=e2rSa;~*Ql&p9R(htQyK)OG^DGWgf*uWVK|4&mO>}GzCS1%3OfWm zIM>L#6xCz}LhWjTfI%d}N&0V56KS{7Z} zHQDu<*u1pet9d)ZAhl&e3ryv%cnD}X0n~vSe2FUFU7sxcHx#%yjMcNdJJ;930sb`O zC%(0F?NHSB*Tavi^+*ef>2LS!K>Dg&-GG)CF%{BK&U)^Pt9qX-;`4*O@ z)H}?sMAY~aDh41YlaE2M#S0;xhFNMt?c3&5Ag*{I&LYdSSSIkLNa0-kpEN(;^zX`Q zqr>YdeqTlKzL{^|zE2nAsOVgfpdQpLH=e<}6Zw8!1P4w~{9y1$S8Ls~2od%@HU&;* zfpWN2kMBXy-RQ$DC1j@!5a817-bvwYSB+9f&7EnyWD>SMP~@cS7zba3@vyJbce!O7z39z_ux9kJ~nOPwa{AZN9_Ay?@o zTdh?h5`9*c6+|Gh?RY{${fifY67uT`#psruQKxhp4s|+Dx6gMTEp~+Rk{(xa!C2g9 za|r4uJnPJM9zQ_)JKDe(XEaDks_hxoz*LxUTMqc6Z%~sw1}6pM9#-Xyw=w8aL7UF8 zC44z=7$F%Sc?7I&R@s??MBW78h(%L%jt2#AeIAJ-E1TKXGwc<+|I(FC5WcT4rN_%Kf6$?SL6g?s(P`-5A*%iJ$0+xjGtqt*u-66hFW3GYMXzP`# z>JzY89z`lr0_1K~-8LT5r@c1mK7TM?t?q;6k#;3~bq)}KZ$eTV?&SNmoNggi3NBlT z=;JdmV5`KDa1Xhx!EKKSZcTFq-5%AUSc8X*Vhm1Tq8DZ+5;YK{f9iZ%MPuF0uw|&f z+4@?rMtSK}n^{qI55Gpr^j;YjjY+pLm4m8X+&>ms;fsF`^cugpAzH-Fs_-g7&xH)m(fiM)A6hnxIckZut_0da1 zYuy`KK7{7r)j13K+HiomD%eYe1gX>B-p6ga*7xCr6OB!NiE7w3W!moS@r#9O9lnwI z96URd@9m0zef4AlkB-$S0H#)FC2-o zq()4*@y_|gL$IhEivcEbBAbP7L)@F|JLC;`Vc5`=*)S$J(?w|^&D3q#2yk9NQ3)Xd zBnJbWvofefKMxqzeUv+t4@CTFcU$eJ6hoRo)Oy1U)x#N!GS19fRC;$Nb2?@325V(Q zIB0QZ%P}w17FQbM$_LV~dCzjQS_t{P48c65?y|VVoaC20rV~JbI*(}BUZc{TiO1(D zRkseLrcK3L-Ax87;!09STO{6yL$wU{{w!`BXjY=vWKliCb)@*&T68^A$0urV5!kA# zbfu>SYJtJ(mlnjfCj3L}m94<;BF>r6T*>@atoCL8Vi_2G^nvEz+~4eaw99E^Fb) z$8}GMeZ@yq^f(K}n?+kN(JlD*!4Pku+XWV$s{_1S;z3FU-!Y+h&n!H?2?wc_@G z&1=v{T1ZwbOw{5<@p)T}2JP@%ZCmxK{6#8j5Cnoz}tX z9pdDPIp%N4>gmHp*tS*R46eaDKNY-&1dR^}z3Gs9D8j;fg2bgrG_v+$T|5eD4)ynV zi;M4*3ZCT+_~$gzYqsyt=t*MnG{4Si-VG8&FPIU%$_Pml%?h`dbqy4-fe_) zK^k9cC3HPftVkXr4%#F{saiXg#$Xf|19hj|xJiQC%}Tgp$c17PC5?WZ*)^OTIb2MZ zNc3czCHk#M_y+Oey6x@$QLjPV#1&{N)V+FN`c*nUj=JH3=p#gbb?J#x#30bmId)@XMcd3HZbOAJoW3<@d+r!Q znPf-ECN2&@%P+d936IKzXaOuCqr-H%T|L}Eh~E#o3S|A%)v`2v!r2_j-wE!ra3Wwi z))R3-47l?!LtLfO0JvT+g`HC>_#q3mNE}jvLm7L#iQ&}uDqy$A+?^ZF70(~LCytxG zZ6Q_4sD5`v7+tTZvXG{|=267H4Z=sUJ}rpv0Xqk(?Dv53jx8Kikyh<(OmDbFcAUF9 zXFK`b0T5IY#Y-VY$v+uXR#RyrPUl+mf(qZKTyF@tJ@_}3LA3%q?QlaNC@18}x(^L?EC+%#92rokp}soa2Y9A-k44{3n>HA?Lf%RG65ex=~t*g=?b zo-(!3DBy+@q>C~f)tT%x?`V2x$^~OuoZim{AVmK&zeX=Z3`V7}?0E55B`Oa!_wNGh z_ZnUL0Z(vM9xjHwo%Jv8d$j+DRNJ|hlg@_AXK!Q*k+g4ho*;PvTv#R9UadE-J%Sa0 zY7^TjoLgkkf?-1OOa@T*!(a-viqqbm3P^8f&dATPn2EQXSlIUU)28^b3(X@)SIQ4^ zIGHs%P^uQib-g6vRXWxIZgv>V&_d4Lxh>U+Nf@wd0RS!o^&ZJv0-y-;ORgQ(RhYrFl2xx6R0u9Ou!}j`kX^?bxFG5~~FpeKLf_;j~ zIGL&5pJ1BW+A;JS;qrN@%H#^DxT_ysibkqoyGU$hogK(h72jH)z0X2Sa_gV;a$h*Pog{T6>(g-?aI5d5)n{_HWG{Z0imVS;gMD*cZ8Cd zwrWSfS-Kk~S?Z0+m>lXR1^BgUurNW}1Vws&{RhzaB6-ch*wkr;#HBQLcYOW za^R>>Lp;-MN5nZ#8)01ATbcfP=Y555DOE?3&px~G5T%ZmaiuJZdkN`jtJ>aNl$%cU zpih9{h@!cDz%Y(8*k*J%-4jVzIms+%cHo(h-yuvCCGz))oW8g)4E2XF43!z;zA%d? zk{NmJ^Y5@?BGs*<*KLS2_8Bt6(T;ke@r1*RJDR5u$rO*rr2%ZkGSrRi>e3R4H*RH? zyyc^6s%1=y6ztGvT2^b=smc;;4NHYg1w-ZFz;^BG^S=(|^BpQ2I%{0{z64n&!bA>h z2cMuwoE>U>4|8F{0M@f-SS|Txlb1Y|d=)8yY`S+T+4n=j>@UZUUXC^&2js-Ok zaL~CY4Wf{_e@TXfo+S7C7()58hJ-(gw5|nWJvk!*OCkb=M8XP**dJ*Rn{}< z^29*ireF`i!`%q98h#H}i<;!hfiQ~EUiMssqy*{k71J3jCC<)yb%2ecrJf74+qm~! zdAB}Lc-gNEP6tr}_$Rs=J;%xP$h$QqbBjT(bUn%9Kx}qOY|lX}1KYD!@<&^$k8X|U zm~q~D4Mq(@h5c-=VKNuoR>hf9Zp$_BC9F2>N=pJl73$`#Zwp3nHqw37)VOSz2;#qO z`89-EXT!UP^VC$1YLxPO0*|k3JuxJ|TxN1%c=);Prz@Ba%~u5YD@O$|HJ&b}i$iuZ zlv2JP$fRsYZr+7*_UxTcAH@)fmOZ4&@aJ4x$?h_9IzDBaiO5w`?oe)`;M}IvqE?87 zdJ|g6v%KV`C4>4(_$bM;n#Eh;@`=eFWw&u#m7zVwST0YA0^Wjfu|bO-%Z|}Dijxm( zbwM1qEh)-Hzbc=C&h9p_Z{wipJf^}O+u}p+ue24nr|Pzno6`;hmO^&qtZtbH6m9Q< zPsfr~rV#7#q-r~gj2+jTa`&vqxw(VyRxtL;LrAea`^}E2*-6k7b&W_omx3tZ;&I@S z@j%I4vd9S_%zdhAtW^*lq0PNB-bb`^0mA&T&iQ1%4w z!-i+pwX5-7aDF|M-Fr-)jXT&Np~r>zv4hR?r1FEWf-aSi{UWi;w@S)hU#G8JY-Gw{ zPD(FCKuNsH&?M*iF>Z1N;FN_T&)izb`U|@XjzTT+&f7wIYmZxR|PNXpF`cXAhk8nKB%@E z5f?VM4qiq(1`?Kw=n)bIo}*QA>u@K*

SkVDcfbq=jG>qBv%m`2wbG(3U!>?pk@OzJ5_sUa8%NDmUY{RN=m>v76=U?2XJQ$%lrAo!Xc)o z?g8Eq0T{00++T?=xgwk>NC5~z FqX55pI}!i@ diff --git a/integration/mainnet/debug_getModifiedAccountsByNumber/test_06.tar b/integration/mainnet/debug_getModifiedAccountsByNumber/test_06.tar index e049b2aac34f936acb98740536dd1ba8ba8c2df2..bcba25ec538603f92bf5612dfa038269b70c86f1 100644 GIT binary patch literal 35328 zcmeI*N$(}sksWZY^(hKmHySw)UfBk`w7sx*4Y`qHw`_@&NeZ^n(0BLmJW8T&l;L5~ z3j->3ldP(LzRbKs#EElG-1p=2!;gR8Er0u;fB5d(U;H!u>ik-!=@Np{;v6}<~M%q zeVadNf0LK<>!+9V@BdGJ{oOCWeEBE;|K-a!KUxak{EI*S&hLMH``tJ9bN>Dx{{A1H zAOG!l_pjeyKli`7uJ3;T?T;LiZe*fF^`TpfpZ=WBJ`}pzri)a7b`iI|r_w5h4^H1-Wcl77KJAVAffB(7l5B}=!{^^^4 znSXiGAAbLvAHVt4-+cMzKYjk-Km42j_|32W@T)(*$(JvG_~U>5^zT3X^olO~ufP9a z4O{=`A;`x1{&$zp{AT#u?&mS^)c^S1|J?}xe9`ZY?~mVpp8L zBR{>u&ky|7u>3U9|K}(E;}99GKi~YD?!T|ye68=^-}gIj+x?#FF}&mJH}lm$g~s_yRQ8ju6LZ) zcip>Tord?hru*IR^VsL%z1Oo`buQz%&eQx%_d2fqd@k3pUF$du&vb6*)*WlN-a~uU zIQp4m_89XmEWO*7E5|M&>YDfddgtMur@p;vo|b;Ox~-qDv0L727^Z$b_hs0I>zIyX zd8hUE-tATX=<4p>&&<J zhJcdWq#hZ8{c`}r+b*!@j2K2IQ;&+y6g0^(>~nq&wKf^ zHqK#wpZOm8ckA}?9frL>r{iAT^mw|dpQp^@Sw>?Ry~s2ihiSW}rJIN0eYe@?Ovkv) z@4kyG^BkwGfA)KRc6VHl`JMVQ%j?}Aug_Zh?LD`Byse94 zJn~ZSwRGEYT<>H0+R@mS@tLP#JMY1gSdV9#hHkvqdo{TCzO2`3!>miW>b(19>F;CT zp7XxP^X|6p?apU<7Ek+Z*RyZ)Rz{;6o^_p;dD)*~vNcBg#N}Jpox{7%@9AByXDqYa z59eUz-rZ?^Te5C<817-Q7FQp09B1!i%4qcaIKDRE>~k&KZ5t=!uy)4AGQQ{G+P2M} zYv1dhm$Tc3a~sd;y~fvI&n}#nZCH+H*{|u@mu@e+avzQ2dym_8y!~=aLd-c{!!kX# zz`MTB(hr9*Y43Gj*Rj|Ni$|Rd-#+)_vLDOj5zWBs)s|ry%T-s`Z~dCxvAx4~bkC9Z zoGzhcf6ne5rfWO}f%aZoFAR;RF}pm+G2hFypUWb^y!~v-CLv%NpsO!-Qlb!iG`Z|VU-m+Tv z)3A!v+qj7{LxM$b9_Hb+@`qzt%j#VOm2T|Lic8q-~Q|#_mnFZH=ixs{eHLp zXz%4OpT|^roqk!L?z)#=fD>T$!J6r8BJpc4Ykryx1mH2+#fKcRKFjb}FA~u9T>Bw^ zy@%b@-fMlwa~bAAVzZUUZaR#>@^^FE)`&E|ezZOWruXG+*4s^cCz+Q?9KF84A?{Y5N+;`(PiG$XLw0k^)$#6c(sP+jhhJ6tc1+Q+q6KE}yc^U89x>ifM za69{}#HjwUk1e6o*xT>-?kDBxW8II=&L5uLbdG(w*L0n|XtBu6lICQ{#2dl>G?a#P zU*$k^Qr6+pjpMj@-DB$PP*HBWKIrL6?sNBjdDeR?PiT$p?|aM-{I!}lfNGrwf$5aL zwsTe=&f|WSi0$=1zs~LuyYFG!g{t*E@5fH-PNDxcg*CE?Nu#VSM6K5)M4u)Jo?2zt z=a{>Dv;r0zp&j{**1X%Xw>>DKOI$NR`V6m}ao$%a&`02`3cIveYzTMnEpOt=H!_-M(Gb9DMNxTy5TGssejM+`scH8GJNTNIk zC}R_cEcx5iOzTnKOZc1m%O`G$9zth=#dSlz9uC5~)~WBhvNqh@G#UK&fNqz4oBC-O z?Ce3-6m)w*Q8+BwVYSl-0sH87MZ@vj(3>FATUq_MgHvy}$VdeP+^PTw1|kx|Mi08Y zE?igRf6j{}Vd+9U1*P1_)vHrx%i?(wxaHTy|LyxU`=DM59ed%pon^aU*c<0w$ao<^ zMT;4KM;T(7;mvz{9*eC!p&#FkIjo!D0)>fHlb!u;(p|uc)x9lOnJ-J`I1cYUNLP}o z&wb`=5=sw-oV~rf0Rqt0~jv`P+9_7_YS7nm;Sg!qxN3U47k6L z<9-i^lzB~-@hNb^T{64(IsvM;jNym5`TS$?kpS!Ym|`(QJu8UxQ1tyM7211^`}#`B zvPcGcUBG5_i|F1>fUpgg-E|#hFAK>E6Vu(+lp@`88vLoe*J>ju80Nz=1Xk=SMccEFhh;U%t!h{~y5^Ll zAD?6KQ?K#+Y)esF<}xhvV4HLbbTwzeHb*~=>$dObcKI(E%f9#W<2Z}u(E1_|9uR-~ zUZ+e0Ju3F#M7wyGR0cU25g@91-+mSkYp%R}kQi$!cSYFP3m%=Nf^mQ|fC>+*xUF^N zt!y*DDd_^lY*9tx+rexWm1s4qLU!d~`-BpSqII!`>?ylTip5sQJ-71J$R5Y!6YmlM z2KhoRhv+t;`5d}klD>@5wJExx6vd9ZvFtCaeYD(wPzV7ZG%Fs)_q^l@i_*I)cb;=A zYaXT0L3xw4Ds=7>U?P_ZsgPoi^OHJ@TyGv?&!wl}C506%#ydW_%j_ z+|IJuv{_!2?u3tm@78X|7u+e5QowXT(q@^oSLFdfCn$cM)IS#ayKILkw(oHk;+ueV z&Sj_Azp4>n&kvY231QyV|BuwC=J+Je`RVq;&Z*(g1zg=lk2e*c#H379@Os1LI>8MO z+}F5)O3pZnTD=Zbn1W}tKJEp8u{;5Xf)8IKzdkO4_5=t}{&iE+1U)Eq#KcJj zSL@1tRYDEtGT(x_YP5SHI251h22kf+Rk3Z)UY^h*RUvmWqe^W*Va0x*pyfv~w*wBU zxc9ax>c|f^(4-#4NzY*mU#HUYM&h(rd6H*_VY+R$e#cMLQ1gi#4|Zck z%3S>`qqSGb8ftPiVydQn>_&9zJ0sQt>d|So4kC9cSHbc(j3F-LYeoh33kIpU=f!g> zg4OiUNX0g2JeWw-UeF(QvEfqU0Aq-R>)tF8pcqDPpU^Ud6V}t_y@3tUbLx$`tT4!I zx0-$pyINxJhCl%fd~~q+v4_@MQ4^jKJ3LPHgV>m!TUG9EcfX>=+9wo}PGQK5s{4_@U?Omd;9Ic= z!1b)sGx=rpt|?0%tS<ZhVgh;6d)EyJjHreQv$N)_o&ylCf3{UaIm{m|P)H|T*$0lUUive;E) z0N5lslr=v~%P1_Y<0~MVEt~C7%${z!tTr~X&PSI0bzhQe4ybC z_ox!7GUZ-)15p!gEf2iN;d|)ZgV=M4!BJSyo)AL4D#haqBw*Awq?=M$B{dlZnP2P? z$-Q-1y+2n{fBLUw6!f1p|5P#$kAJ>P!J-|QG8|n8y3Z)<{XOkS8`CJRcoa)!36oaV zyxJck8e|pJgZkvdEFpYUDAL1p3*G`ttpfh^T$1QM8gA93qCr^{X2>(Z8p@EqJfZRg z%%*9~C<4`?0tAbG$X|~oGQ$Hzf$1%ISuR047&n1b5VcYy=V#mT%%c$zg9eHAEo%dJ zJor>JlE;~MWY%rlz;S@`Ol?yS<1o51^5@Z8azFh-v*WTYlzBst+T)VmY&}~VWtLx< z6JjPL!5Q6-q}yQ*w9h%i`QoVuycIj`+OU09`*SBW)`huO}5K`+m4NRRfuIY zfCF;eO&8v1Qp+uT$c&f{#Vn>sfn~kHE$w>=fmnv?iAx1_pgf;37E~TvO(_qG(moYU z?N$B^jz+qQ=Rg-)Ao2ul)ZF_MgK3A6#@nmthHwf>6+?fMu}S52=+n_vH&Uk69R7PP zu~$Yl=AH1cZnwfijy7SW&Zxbpmu^QC+Ntfm!uXFgQSkvcj$X3G;BK_a-HVF0kh&7% z*tcpaHuT3 zZTo~Olj-Vcf22n&?`TcQV#+V@M66!938Kr^bts$!X~?s{j4+Kr#899h46H>3Fe7Ly zD+1$%?8Q9vu--gkhZU!fOz5_xF`z0Mzjpk)%#TBAfN`%QBh z`6r~NcP58Kk7K=Akgz@dKq!QG`W4kO9Cb8=R`@RD;wW=VT?_))KSqfVKb%|26kEI< zL+OwhpHaWnpW|vZZ!-wnW9P#aE3g$tOW9x7po$iCkp#QkZ)Pap&uvJ7$v{_|(sY(6 zI#~qKX$fUW3qYF+fCoJ#PSvEPMFbJ<%6pNJ)aJrJ{6v&Xok0*J+uQl<2tbwPEp)f< z1vz#XIs`jwAW+Z>s3kIqC*FYUmjHdSnAEFY6Dp0HEi^N)o2>!#K}qCI1Tl+D@2%Ub za3#awflfjvqGdbIK`_Rhp>PCc;196GQ?*z1!99}&GARUS6N>1N?v!PsHB13XMei?H z`H=-00jKcI&5$9$QFM;QlC)y`G zl|O0#u^GZv;$Q;3*n_*KeLl~+pkT4gW#nUvQOg-)EEcr7E7{y}Ae?QB+<#qoVnl9x zFALLpvK7rNMql~*iBHcCkl|r%ty9by%G#i!SQBC9ctmWIS0p9+Af<7GuVM8t%U*67jvOz<4X0?^`HZ%IXb*l%HgILOa+8_=OXprCUh}a9Q-$N&8+% z=u?mvaiA1<$*b&4Na1(~fz=j*o4?9>^0q0K{!=232sR)aB?uW|9(&v`ql5fZ)`qG~ zsRth0X6^%aW+Xs(^*W7E75uIsY?-g^lLweJZp4`_x{1&{L{tYW11z!;WbDcxDnyvV z#7Z1J^p6yhS!cAvct~qNY+PhAK|2~d$x2v4eNR2V9n_067X%X!Knh`nAk3juuX<2{ zG)5J#%!S=Ia26Df7(8F(XZws4(_SUEC_Dk>E{kv@{izCV)#LuGO$h?2vsCSdS-|6f zW{U(DvXTg?D&(ZHqEzkDnxL|lC$y&&w$j`yWp2or9m!~u7zqL~I{iiXGA*@Fh>4Qo zVIk%!bDOk@N+LVPiUcIV=@{L+jG?px9a?f?42+<9P=wjP3U_$mJAo$)T{~Nr$v^OpshIE)yE{4%uV@%m-?m&k^2%gk-P1gt$!LkF);~I%a2M-iPq&Esue}Q0a!9_& zCBRxGMxvm~7XA0lZIy4CDsn$_oK4 zV!t}^f)Ot|WZX37vRxZgm#A_yW?J#17WaR}XRoRs#b zSp#EbHFjKu@8T&HfbCV(!iV<^hRh01>gP2Qc_hIMDpU|3>Q!RYLKJ091*bXJ#9i47 zUyNBHzsMPcF6ylm(B3PA1lj48?r=4!w@IrJSm^y16wGpUE{L0{ zMbV<(<9-GmGa5Bl{>%|+SV0@4oPqE!9J>n4!i_nY&8mGue$<}4DRA=Sp~7V>j2Yn@ zQ$Yaz6c+52B?3eX2@g90)sRL?HipwxU9BcJ^0a$3`(7$3$}(UULWCH_<>Otz9fi^$ zJ10@d0HnLJQ{qm*Hb5Qd0(=_2gM&6ES9*YKwnIlCQCU&19TXKyIgT`6luw|dkc!Ku zrI-mjFDB!5G=^DrUjq2sSPBWo*Jg)~|RZ58t+rS)aa6S|lxc2tHMo#Pg#B(=!o z*ejHnP_BZN#@c&j#Njhs7prp+U*v2&mh_J_hEQ<#*DwqWD- zR()B$bfHw%@?|HbJnZPeijELmE=dX^vSG1%WvAfRiH($0CJH8)2ngy8j3>HY4Wpc= zbyzy>dojN9CwjX|I5fE_Ggi5W*c78F&p5iZAHT70jHlvGWJhWi9x|f15!Y^ z=-9qjXA_h&4eiNy%>-Cm_=UpAW@Kk=z%h+W)w@sXR{89f5^ z7}tUU7WoFF>4gr2l;Vkw0(>!#V1`UJdUhd+mvw07z4e5TMw!phHJUSs1`);P)TR6~ zBvlKbr*V>M4vIt<2hoR5f=wyk{NwmGc>JRd~B(>@e01i4Na<!3D6zE| z)S3wGib%I~`L3-%ZA!L2=qni8qpC#W{U$iJ?}Zju&l5DfGGxcDD}GQC!7F=TOl2;5 z*1DHHvk6sVV3>uygWX@yvEJ_}H-IAE(qbuVLvj*SVk1ziVG@N2ND+XbcZ&2McM)4b z8I4UXm?$MK=lx<3ha#o@&i0Jpn<9XJqJ6IuRfdztJ@HOyk3f~_?C>#C`ucWQAfmJ3 zEmv{DikG~a0csOLlyA$t!aU#*=3U@|N6XqUpJAR9TnKG@owK!YhVrm9Xv#=uyRiJ)n*D1?L>T(8@j4IJ)J zRxg3KQ?OfBIBZ=ySTg@p8|m*j6|YHy!3vVR6&OwYh|`(NS0zy5MOz}$@y4OFh#DUXOaa$8Z-m4!tlng^b^Ks z@PyX%^X+77K^R%K;*u0sF}A1PQRgTg+pFw$H5(jnFpXl5mds!fs<_#h#^F*V)myL) zBqw~qCH4>pish@MMkngg;2{7)E@=!;*WODlNo&N=5>>rc8Y=8ElomPBES~T_#?!`OgH<|JB=1|q@3oJMtkc0|aOaI#v7Y@pK> zPm0Xu3KxoUCLY$KKU(yNJEd=}QxI{?)Irn$s00oaDIAO+k1!EIm#+qmuR7GQRh;=o zUk{Z`rIJB_zc7(SgHs)&UKp(s*}|8o70~yF0Q5yi*_~=Vd^_lY@sGeYl$>HW+)v9vDq(A2$;yWf| zWGN4rE<#U~Gh?z!0vO%8D>XgK0t#8NT1OPQSiD8c&|j=)Hdiez?5$;gVNen%No)M@ zfQ=hl@eHRs`w*@{L1MbrvX_I^AN5cS<~)*K5>JZ_6Qp%r2d#5u>PBpLimPI^OADu- z7K|FWg6{xK;$##+mD}!CvR*t8)Gi4%?1C%~((u?BB-r&+4I?VFdA9d@RO13b@`)rq z2X>kxt;yOeGzAG9p>wN-sGgHlqu)U87CK7+Xb#dcbFKh5I!%Ck3bs*=h=C~zK_Xrt zACIsP0Ok(h(=CzhY~ER_E#P8>{}44tdhPS1~7OPWu)faKfPoKFg4Xs<_p2iEklqu_>{1L zf~e(ysoi4YunI)nm;(!rVvZ9dDCR6I((*@uk+zJW9BDxzWN9)r&SPxAH0;EuYn~5D z(*)K==uc{aL^`Vp-ZYbd&6USdiuk~GZE)7;9QOWEtW~cP zESDglVrPL9pG6!=roAc@a?H^%Crk}f4KA>Kg4p5`NWl-4m23|D6K%+?v__S?Vo~K~ z5W-qwa8#p88k1sAMjC3~%-dKE@ zftY0%L`Ven+9wRiCDEhl!yVvwp)ZvYe_51pO~w>Fk)!M{ws4pq;iaC!ey0Y-&avnL zQhIJdtf0}#E!&4sXRxG$EVMjv2#T4s5`av{pMj+{TO{ohn*Y@KWJa25x_Rsj+N)*E z8$}{;)~WO4p7$bs@Yv|sLT?nlFbDOOs%RkVj*G*N_!-d z+6fR;T~?jyOJQdc6&N*aJp>?c*#*?t0`yI&R3mp!ld8@%KVpa5dFi2$jBh>;Rg1oE zL}!Dxr)Xnv9X|NqF{ziw>th=Wk5*{{YI|`8NYfja38uRvs>Y&8pZ~U(6M_Q zw|>KKlUCt&meZ0M!a*KkUzI3Fg9i(9o`%(nof^8_2MR7xh zJe6hY_Nt5kS{H|@Z6Dc-Wg|45RRN6mhO(DY$?1aAKm=VXsU4U?{i4umb8sKv7Hsnl?Y;12L}|nJX|!g*bdCUF8Kx95 zNN0?WQ83FG%4-CDx7smuezBNpHnXK7HZWP47!Oz-CaEYjVVEEFxI2*pkO0)-Xq8fL zBo(Dqc`r3QTP-RJN>GRq)>Bv^4LRHeavkg1{Xv8Fz1WhhLP=3Eq@M!yLNTY}T2$&I zPBTwIcr5IJkU(8y6%?!C6zy~Tyc!dd(haz5mK)w>3~}9Il7t8Z8~g}A;jp!!ijqu( zIU>l$yh}V3)-hthW`sZwCnt3x0jkEkApuD#!dP|AP+*h>>hY7)mfstc@|#U;UvdgW zOxX!u$tftc-$A2aSPDR$%5L6bN?O38s>ll`$mdk?R{}Xv-+-bum#!0D;6p z?GiBYQ0^tZt3u=6G;=((Vwp9ytJY1kBOXA@*dnbq69Tc1n<%2@sy=2(|Ulk|^r-Doa<}oXKP;bB;iulp+m8;?Z)n z2%!`Yq+n{V3fp3zAQ@QiHR@9Ntre?2jRKkkg%KH|dL4<#_HWLPNr+(0kB+S#Ll4U? zj2LNz0{7Z`DPLDcE!X6cVjT=;MbFOh9oiZk@1mft_)tr~L?n|_s~nwWL?vl^QHiZi zS9&XnNh$^R3Sv1TBB%TbM#SP&qi7dL`LK;ebNg)0x+}T(bSUAK4Z8iDe(xXvLCPuy zZ0ylIaQj}$5Rn}3tnrEth`vtMkkVOQ7TXzFQ0rgWm7GUGSbWs(g_~7;ly{o`Zc*#6Lp(2Gh>e6(j3(~^C z7-_37YeN|0yx>v5a$C5vdAdlA0uMMnODkUBRAog}k=Q6TmSzAPkCW44CPar*LsU47 z=Kfw*YoAcOR3SrNG8Sf8WQy3V0)SqqxnO`l7FpV>NLz7uBDhu4heo!j7tRIyIK=>2 zTokZhT_5T@KDUt2U=~5t?|`=v5JP)nof(D$zsrhpxDhTs$C%+U7(?PF1RZc2EYive zu*1-wHHuP+g!Sk_pL0u$GpLrZ#iGWhO2`n1tHT5s&+sb4v<@&zsdR)~?)@BK!);DU z(1BjNxg%yc%+7;|R1Q{9Kl6KwpZH|a7@aNc+SlzEIwvlH3uL9<34bCT0)V$7N>*?Z zPZ$xrpk>A;TM1`qxJ6A4B(}(SX>}Ltjn&b>z=w5e@5T0`Wx$H?fbb(B3;sw81R`23 z;Kgzb%UrIC%%~RP7NeF?84)L7fii?kUHbPwljaTEds*7R@QSNV&zD(MO92mQ+f|y! zJ8iR=mi8*i7&j(FH0zivfv1LV`8E}09KWelmAC?15WrUm4j`X8;SaFb@)AC$~F z5Q;UqeJ|@%XPIim;S8uJd>n|WRRw4q!(1{z)9WgGfcdf|m?m&D6tiq7V=W*kkT{oV zTWPGVJqS8v=sn2{b5$`HXnN5Y+-V1C%1|eE#->O7>C5x-dJl;|W z8*_k3xa)iIX(>{K9POAa!?BbC`2Ij;r8z>k6ThAhXz!)cpxpU%g+Bm^SL0}rVxSc> z=3qVihCp5_`tayc^dpd@8!^@y*iyypSQs@Xvk6CL^;P0F9DBinN>ruoVa*iDLMfaU zGA~_%uM--|Xvq5NI}khFxiYc;<0&9U$Mz+YiHJV3@*8PrG(_41APibqJTLyIG_*X( zbL`xH5&*L~Vh>a0!sFcQh4t34s=7B80W`CHvA-Cg&GrMd*`;Akw*^oWfzy z!bjB~px<5^2a>5GLGW}-uh^=f3 z;Q8Qmj`phF>Iq}f%VBD)lBqn1m*7^4Z~UL4ApU-ywe|@SaWWjQP5x;(t}950uAzxW zYH+knoQq#$?3|N^?nAGUI#jQS6M;8Un396uM3 z^Ah;rb^?EUl`b(wuc&RTnbFd;EF)M+z*O2nWej+(GD{{lW*O&a*?x(A@!qLeq|F?P zYX95gUtw$oNeEab-WcdGy_6KCi5R0YbU1`|K9o^)9}!IXP#mzqgJ4%@sq(hUCyM_o zZFOm}2lU(b(qB)h;l$2Cz8wA0hLF7)PPl>Tdzcl4ZAE$!b=ikihZqp@4!$QwDQPlC zMB$7$3!M*8Y6rm(J-`;htDudZr<+`XV&X9)1v*N_c_{?(vgR!SXeJCIDOzHUL}k&F zWL4Ly5TC@hR2{7%wXh5$hue9Sb~;62azYd*9rUcZlZ&ACAmjid43>T^0xpb}QyCJe z*}?2E1iF~Yf>;X%gp~G-{d@|70!3B2F7GvvDYWRq?_LTK8umcR zMl7KMIC?f{-q8~UIix94Gn}p&@s^Pzv=oR>>0!&lK(f{eV%1}&c|z@~4-gBZv`3M# z_zKkaOWNYOXklA>y1ruY!^a-55eS1!$mj5SShD1|F{v$$T9jq#v1i=`C zlfHQzn<_5GrL-7T*3nXxxcyID(_UfGr7azHjvrGx;)P@>`bu|Tb^kc@VHsx~Map)R zi}SiPPyupKQQD{0@?KIh(;N=;vmX_}v2A0)v45ydepcxrEj8^JLL_idkRUaH`9?)g zKPNBA@~lAC40MWpY_H1R$qF`8P$>W*l!YRL41oc0TN#iR?Q&JxsN`(O7U$8oFDZy-8?m2*{wUGT zst7&B%UEl@4+E$Q5QjMEEkVKBBFW0n>8#92It37bOkUQIU_@0DP7FD?X30) zpOS+kCzRRky(nt>AaW+V5;zV-@c;qKnWj8IN=e#QW?!!oPU4-EnyEd~w>D`>E?`4y z_b1WfIYHZdS)z&))rh=p`5iilazMVNrwV2eJR?SYrKnnYByZ;ekh^m>N;(qL5hhaQ zkimQrK#sC0BOhl5}&HLqfaOVSVU}W z6CjN11v3XFZsdF)ERXU)!9+h`iBS2?sKY;Y>$@ zL7XN4A}>U_7ax>F8OWu%`2!_s-@dNqqEs57H15Z}TVhmnqSz<1J@`fxq>46@C2A^Y z)HX}HuH6J$sp6P*LM9Gt;*v-p%zS}| z9`~#c9^bN4q*2B$`PX%d--ILOaX35r4M>l&Q(skO#3EOgoDhlPi~tpa!X`y#Gj(cN z54}elDNiW9V%V)jEp!=nPAA{cVTs|{rA$D=Y_nOl?*&T8;R%@zO9)*PQq`9Hw3+KC zGXJp=m3tw8i78NkLK4KIwpqzYT%H>e0yupqrGTuRMjXCWfW!~w3~=v zCFbD9%G+QLosT9~q~?v4`_l=RjFKKx1_*oy$XIfU42mpNlJllB=jtnCqWtNoN{W=| z&r*~w#db8pXV5W%u@#nNwU8Y6FS3KmG^X~C@2*@GPTk(cP$=nH?C^;ZX0WJ{en1s= zVcbGl8yw|!y>AUje;WdUT!!!VzyJrcDn}>h0Y$xi!oNTc|GWPzYaTZcy*f?e1Q}_} zy8|UWU4($No5dJIC2#NL$5M$?vz%kZ*vvpDEaH05zwG;g#c|}7*vr;{AyhMCZ=BoD zfT28+z!AndvO`J%)o5eTA+srd_!4pQ>taq?;o zR$%(6l$McUF_3CvD1 zL-ZI8dMf1QoRXZFNg#1vOZ#2~3e)^)PE9K~)lg|;Di8`ef7a$9Kyq=EtB7Nf*?Anv z6fsJm8q}8jEkw64dnQwtVtZ9IN?ZZ5J%dfxX70DIW8;)V0S_m6>kuw`88G>*Bw{1{ zZ|Bo*oyV2aNaWgRPFM%0hqU*K(gV*6&y3s9vX!61%!LjHLd}9X1T9}I(OxB~kqRld zG&zlk!H{g_09lSBh4A$9l2(s$F9=Ak7GBIOf4W{3i^5{YCnogk1Tj23*X_MN9H_%I z%vTmp&NJh)5x0db+28h**dU$us%ZHiYZvCmvSs;18K}i_{(*jB#{db-Fn4M*h39Z*LULiR8jMHx}D=$%AntIQnx3O?vTZSNH} zg!<{aoFHLMGTRdXw0z|ZHWg(OFPW>oN?;WyZD9<4!by8$x^sC0tqj?Y3sa$q!cqQK^&ZP`a&%U-8@u$3(l2?Jn_ya%kAVWAgh-1jPWGohH*tAd8 z8aDVHdl#GIt7gsL90C9rVWG#bh!Dt$1&~erGOSLdmY{&H zAP0g5D^9vLWkqD07D1;hdsS5iFM$}YHYeb;@0Hphhq!&bOW}uM6(z6Gh`!N}kQl1+ zEKi7I`{_(t1MS>72cohEA)~|sIFC@3tqyFr_p$`&0PLR}<=A0<7zBH)pVlqn#qQIi zDg(4v#n~VQnN6~qGayynpT<4-+|aUuYBGd*-4iN< z8s9P+QQ+iBJ7aRDFoM*%P69=8pH?GQ6{G;VzhojZyrUOXQaRa%=@_gL^*?=cz%s0C zy(4kTicEcwOWX{p6OX=wAH0q@;uI-$ zn*&MZQLR~8AHEtx#_pWJLVHe%&}boaqUY5E<^ZH}AOr!Sj3E+`Q8@tu?SlZw0jA`u z91|6mD26VdlQN&KfI73zU6!MM8Ggd4!TP19Nt6NZm-H=M=~AmBYsx#L$m-C z0ma^9$!xZeh z0*EjA-3=rni<8DAl+oayB0n2B*+iC%%Bh6LHp)vdLC&7x39r?Saz^6*K){^D;nWz1 z1?FoGU=7a>7O7x8OFM=XpBOh$#e^c5&u|DCG&JZz--v1W2#O`LA*%2#0q0B-dLs2n zh&5zIfgrsHaT8IE%2ikhe3_0&$|_*YejreTl3LKxXyKeXt6qp}pU|NpW|jWz#1d~W z-3wM&L`)?49{j?mqa@3G<#O{Se5n()eBfT1p@Q{ znwi$PY3(X?fl!!}&=GwVIe2x3MiZJ-*~lPHC2%wx&I8nufe1l#&#%6b2fgOB&lInS ziTLxfDY1E=PL7d)T2RzqdD^XNr=;uujJTu0l!hZ38aQPrvUP&)J_vbKQ77U%2}cO4jhNp1L+*&L|b$xTiQpo_Y#HB z)rwDlFF{Ml{zP2Q7h0mW05Oe<+gCcQfxs^dgkqefI#F#1ummQZH>gyIk6Y|3&yobn znnzkl1APqJDh_1rPbUoX^=VrbMhY@d&7`$LsuXiEV7}86G%?^N4p#9Q3B{F>mW42! z3SKr^JiYTNoT&wN^Az&LIO~DH#o<~;g9iZ0%3-JKEci$Hs~rgLj(JY3S8(JMv67c% zZ|CQ;Cs4czgCQs5SzuNs#^{d0HzgI8Oyv1xRr0-I*Ys(p;bY|{9uiwi0}x*zQLV%% z@yi(dxO^t4s412DopWx%R>U1;R8M_Huok%YPseGj{H&Pvj8`KO+gEsHS(?5*7iA>R zr1wIY#U{znaoXYopyC#oi$_adQN)2tgA3SMP;t zu|Fud0z`<8m{bt57+{gmya`b#v;pQasT6aJgPX*!g!Btm z23Ab@dKM*~h)HJ>SFwL)I#FL4z+c#2g&4Cu&^keU`JxE0D>3DS8j?v6wk6N zo#Ms|9sJ}4a^#opTlPp*ly#r4VRl;|QJygEMBIk9W-@_V3XSckDsklF`i08V{8QCH z!1L1wA7oJ4dkY=o`EOmF70pE0-Rt-fQy}sNF1fseq&7@>r;N%zaotEyOhEYZ{JT#Z~ zUN|T=g2)>Va#UvdtoIEG3D zNcxPeFY_-BlGYRhu8uj#T0|SiepK1HRIU2?_|y2;)bT){LDRTlX#!oWCbF#ZpdCXD zJQ9`XAXqeKq-cB&c4Cl3F*!v|PafW(cm!5CW%=vQaL z1G&Ht?NQH*f|A`hr4a-1CvfXh)&&~(PWyz~A&fCrCjc$vor=&gg-RR0gIHiJfus8L zg3=%jq?4~C!~4o(zBx^J(~zeF52t6P+NkATY7-87{`Gi(Z4gavxJG;+nL`fq-NWoL zPu-PpD$}-Qr!vOQ<@?7SK%Ikh(e^pDR=>(uYM`YXSuQ3~-Zi;ZNe@oK){IObMZD@J zpzIVhustkBdr0|9+GJLsRNx>1%}8M361tKdK13)G<%z5}{G2lo9R-dTz)v^}T#=#< zuNhWt1t~=#j<+s0D*GA%r9L6`v*3~tEbX#WByOAxn_-Va2z&?Br0;#FU!;YZ|5b&gM$tOP(#kN9SE$izg?R;eKU l=9gbSzkc=k_aFT855N4?FaG-LuOsl+5%}u}{6CDq{{-(_1c?9u literal 13573 zcmV+gHTudzT4*^jL0KkKS!OH=?*I#te}(VW0DxEp|MGA{R0{w1{%QaLAOHvmU_aj- z?*UFcWh=2-sUWQ}Qiw!?w!=#>NDzurMF@>$ZEaK~N~IfS*)24v*fV2huo_f@O-&ZT zNl05|5UQ(e&63ulRjh5cX-Y#ro0TG3%Sy$uZ6I2;wMvvUTR>GJwq&x+vZzh7S`|e? z*;%$9YzcET3uL!@;wqou2w_T+qX=YVG7T_+plE2wBB}lW38_kyXvk;)0Ay$Y03xU8 zr|N0yF-#$#0000000JtX;0dNBRR9em13&-(00F6`H1bS_hDM%KAOHXW05ecYB4`4h zQ$`IYfN7xh05kw;qfLH(?e~GvIjUg@xZJJ6JM>Fh>I5dHLU|78Kaa0Boj{LAV^DTBqW24J%eG+4K7A7Y>q9v3U&5NFxqOt zgwXhILX_*|wGyP3)C%8$vjo9dm?Rw4C|56fC>tnd6_3bzn3JlM%tw}n>11Qi3Z-XT1J0{kFrXv8inNALs*^E2&6#M*W_lS? zVs&{Ls2oKp+ET!=xrm}RL+0axGg;ySP^#44XXgi!j%%q{;#k3_0#`fViS;Lm@n}+VJ8q+k%-f-l|Div5$0)qRFRxO+=N; zL~w<#mb$B=Drw5dQ8G(I>d@uPGX)_8ID7UW_)DN;`qV@VDkES(SU=c)zF60}`aw`$ zpBT&4`t6Y4pZ50`!^+~tTFoCBMWkt~=x>jFXnh@uov)WGL!(h03Sdj|WPSATkREw6 z!=k)WU_{}Q)mN%0NApOMwt{C5sZ9@s>hnU4`XP_gIvZb+3 ztyz!s*(Koyim73SpF<}XZ<{HfZ{>_Nl2qmyvU*TcAoVMlN+sSsC+k6WB!EwPIhY>JG-;by>ZZ^P$& z7aOaC9NB_c7F+^72)5gkIo@Bo915{Amk2k~SJfgMdI8F+)RJQa*k_&8r@vgA9q`VS z2{r*09uv@(uDbw-y)q4$)y*!`b_Q86OxW?2vyG3(DVXTu4o<9TUx|XAjN7gDzam8A z#rqpynAX?mK3T6YCp(VDE8X-x6>kCQqKvvnSlw)HN(1QPIt{JarDck`os{<;c#gu@ zlQrVA=Sg_+`J_Nmwz_8p2>RS8~pj@|?1T#Rcts_WdCL3aW8uEHS;at*%)VpjQ_@ z=TXscfy2XPK5x&%&+An8PphUq34z%5&z6jrx$G*9^oXSo#x0npPtKKMs$muCB@@t% zd(~lQ1 zo~P6gkAM%4fDYdNXN~0H-^W!AJi)f1w!u4|ke|Ga!lAV+W|F72=h@$ZXh}n-w`*MY`hN4Es*{2i4TJ`wrt-TSV5M6&vTa%OPW-I}U zru@-dP2BKU54p+;{vdx3e{^p&`@vK&a67i){z~8LP295rX_J?5K%|h@e`N09ic2m5 zGkyFEc=fs1aqg-zVY(XT43kcflvD`2td#S@zD=EPYV+GW4v#m3Bx0HL_XWj^4l)y^ zL9|JS_&-{k53tnvTig!*EhqG(@H0ug$wN=@{K(Nh@5w7@nA3dQ@KTZY?XL{zLJx2c z4jnj?&s*h~IIGGVXbvz)slp2HGUHJxyU#CaOz;?vILjg|^AtltT;v|MphWiX&m*13 zk&aLmrq7B%K_-_vD=0mhb@$2CIoi{Op`B z=X?Bd^Z@o^P}<= z49Vm+8G4u5W|1hMK6%%vthVCwi7V%_+iM9D)#LUs2||=wxlaw-Qat_a%aC9kTrjUr zeMfME^!@NHd4j>EeMSbuThm7dvIBWeydRCSdLKwE=^zWS;8x=qBEpk?X%~qC+CiYA z^Dg!JM5!tcedvm3*5a^!u=e#0(CO%nOuVAo25Hk3ZKQ~YbKA_M<#j7H+Ner1MtG*aB5l^@FTSHc|5h8%l>{&lC|>P9J-wS83ZI_+J?af7emnM6^RyV7 zX%vZ>&}a^ijvo$G>g5U>tG0Y@OXfE>tQaz}%n5vy*-Y^&QY3>?{T{5sEFzr( zQyun!4erTEOgFdKMZF>sk0#aa#mU^_QniU(vHDGA#+1VkR$aptTFx)r_lIm>EqlXBK-G3@& z(dZr=^ti7C)aA_F*ES=IDYe?40(KMy0b87P>-M7wc>|eKK(U{f6N1K9zQB#j@V785 z)?0$=DRFI`PB44Y`{RlBqNN$DqeKRlttB`f*>#Iw#xL_ND{X0FaD1N}Jz-Fba=3z3 z?j}hm3S3{;&a|a}8x3X>HMg5U%K5(I`OfubwJazyDW>pJp7p6gaiuPw;5*%zEKPM_ zZ-Ga?+-6XiG7AQQC56Q;OX0bTs5w5QI3^Is7;LzM>G_ue&4wC`=rGjE_r9<@gh)e| z8rpP`A9U#bRBJ}gd5m>7o!`abg~LXw>pQdmFT)q z(+bI8?#W-IrR}98dv46}CVPCDzGkn2w#2W)^W=7l_$qMA!wP?3o)B(3^!UdPw-$Un z`f$N7LD?{dm%>=I#nxBfVRBQeVoG>+l>ClV)LVIwggCOeFdO|B_r-jW^>Mvr2ud@* zG;>NHg8B0j{)R+&l&LClqM0|Et@pDBhIvb%Z3CHdGix0*jN$tKVp*&{_!ZotvtC$c1GO9 zwTtt*1$mn`&4bT^G3P2mRS#B8uJ|pEica*>U)pbu*TB->4r3$6BudL}Us(!kz)AWv zW$hzQvlCo4`w%Wv-i?6NoOs9Krc|1Z^T$+t!}2Cw*@|pEVSHAD0!WORldKp3F@i`R!E|78%W2uc8@sJ=}MTrMtd!11aI#fF!0W$TKXrt zemIX7Hrg>@_}4uPjj$NbWoy7--nKa4^o0WM)zl+OJQ_uL79p*IgN1Ak5AVHMe8mV> z_0LtHrzd*+3T)RVppf-}_bt~ZHw~}ZW8`PeK;3SEKyRnAoPlcJA7I5jLqz?l1hA39 z3;5Z+MsxCo-7n!1))T#wM-w+Od9@EnkBQE3_rB)Z zHQJ_paD9v&x-z_E8?<2V@W-Auekz?`ha_^VU9G2F$&y!<-doA=><(9PaO7aKNy6Hn zl3zB9fOO+aR6?#EgClo5g{Mq@U!BZ`n|2^py%VcJsTH+O?ao|7|k_TxS2TMWX~%6Exy{To^=yF)ZpTQIjV`i zhaNQ~KN+XQz>>ayTX&I2*!43@itf3xJf$1f24%gYM$DofS?J0z736{(pKGnh;7Ibq z{Q4>j8tJv^Y1D~(nox5US%@LK3OlFNXQP_vQw4KlqIKUVQ5cI=WfR68#$-2rvEcTPrW^?P+XX2z+Y6D49~V>b zu)fvS64>53JAmU(IyI4T4)Z|$oaC^bC9{+ZPF5;ccLmM^UZr18Iewi7|{HnL~EwE3a&ICa?np$?0dzX z3}B~ITaR6@tl*3oYKz&vM?Hh(;!M9?u1*eT!5fA24kJN}@7E=uF@khGz%QP!Ux9PG zMsG$eR~;!~))n%PqYz|MWlqeWmP;EWH<-qc2IyolGt@p)t2J;6pJV+;tb(&QyaH}2fG9})M=T`CF=p*#m- z%~>K1Kwm*0nt|zv^+1Bx(v_TtqUJmV0V$6j5^SNMv@=^Gg zmal7muEQ6vk)w1@TPI-A$n;(X<|njbvN^oLe)a@m;Dak%2&m#ZkI9TvLFDp7&6BWv zSb7u33>W69#E(KS!t-$T1w?FY9=`iQ1D**2+Y3Y=3l7c~Yf;sJ?~_P1RHdYHd`0l( zkiCM$o`PyOBbS^3VwGOt9t+xtD3Ya;m%dnRys_GRRH80FH(=T5xRZVJPIc=Sj+=c< zuQ})ft?$0Dkxqe5&{(7y^|GcC8$YP9X4nUp91nMWz`%DsTR@LJ9BDH%&_qIV9>cvF zlyF|MrTK-Q+}Mr;Y1%5Eb9KPpt*qOgZKa2fJxrJvKH@06Vkj|hVvtw4b|j?f3dYsr z-q6`RsG|k4=u!c(5GuIy!sVn)WYY$*ZVFP2xOLZzD5l_8^CJL+gsa_UOg*vl=j57X z%#OD18dh7nm);IWMOzKhAH8PE;nt+?&vjciY`vP-VH3O)tLuTI-jj;1oU6)`Wxyv<4-Ov{d&{8LnI1XDr?FnMkoF_dz8e?3>WAPht&y@` zDs6W2YPuPou8&})*2(N2)4tCV(bd1o62AhZEz`cEH8)qiv$gEiH_;z1nRp0`CBFGz86UI1m z5K2z_!(N$_Aex!!C`4SQ%D#taUA*h)x^5e5TX;(duKB;E29!IJAhFROBw=|P@ zwe(?jLIHOH=Q`W)Ww1K+u5)kK^VPM2dd;(Bg}6u;?3Xv)^7}X)VbRkv-n~~>Wq#NE zcIl>nM0t)f%>-ROghnV&PbF7e&-n*u5`TarXM^nj~skgHC<`A8R zdziMb0)B!B@HgCMK+7kG!}SIjZ;rndH3*AxM65$N8x2lN*mvhG%IlF=ZjWINIzFnI z2O99A}xARm|C537eqqT|^_*{&;w6 zWoEpx?NwAFbgcT~OpM~@o!C_!zH(h5^&OS{v8CaL`vkZx8VA0wb2QOaRL(H+5_qz} zbgu>Ak10Us&S+m7z;~^ro!$<{67D=_g6kf0qKi0TUui-YoN3oJgQ<6Z>E%rCyGYa4 zu2{+fe@*><$s%GBC{RLME$ zD-lsR)gK!8qPyaTVP`^P1R2{Vnb#Utr9~Aw$V`D*_8pL?g2rxNnwQ8!IvBqgb!3}& ztG?(AWpwGhB%AC!j4;zv7J9|1x$?kJDz80bA~u|*^an;sX@7rM@^|S^yoN1yyBRvH z4bG=q=~QG-kQ1<&rC6%rlyYDoNynk_y&@MV9!kRLP1K#(^t@~fQ(40PnPqxEuz84w zw~Ytc<`O;AQ3Ij&0KvFCqT+k%Hg(`!#3P~+i)<~d_k$b;n0FJf?qqx64sI5i?jN(g zVj&WJ&t~pc%X2Xf+Z&rts;GxN=b1jPP86gL`uYz6#(JBwW6Hx;Np`B_mt|jD9#uqp zzCRIb^sNu}mRk+#|EN^pCOzLei15oiRU;ijR1{GO{re zRY|Q(Tv+@P$?{z1WUULw=er&f8q~KwhxJ84@RNM{iqBSAu>e?x2bWYZASKV~0Autk z4@UiI70!pBK`JJVOjkF@HBYuFN8-M)-v>4XuD!$G42cqAWt}PY-_?F_pN9GJQf`d7KQV)Ma(g3E=Q^b`NW+J*qyCc=HvNM}&mU4vl;> zCD`JVDgqwx^e)O~nh~2nHTuob1!P`F@qDWfnAUc`1c%5n-dToFwjrN8YERtioS!D- zjC7PjzgJ4bxv56|LbPpm5NL)4u)ONpe2B9A|A_cz{`rmX=t8o2x-&^6?8H3Ng>enlG5c?OaX z2AcU3-njSOLF4VN(HJ0!`fX_kt!z{?Mrym;C2K)W$pCds0!N>cHaBTt8Ya z?$mnSH-p#@OK1)7WPMoUUlgX(T**yN+9!A@+H2N=G5N!~z(NHIg`?#gp^zFT?`ZY1 zy&flgb$*txQt^1M3sMlHIe>t~Q8-L(b!058Rz|R~TKaww$a^8+@KC3*!3Y|-SgaLk z5xBXu2@j!zXxEq;C45%#p32{ZF`>&L$jt2wfD=|Sm7L^2u#Y!-8fy#&?< z%J5EIQ?5^sV?lx?p0#$)yl)qa=4f@8N#}Vsy#(xL=ZjN=yW(pF4)`^GcF1>)$~@+;;YdJ`WEVJ?!<%t8bdJ7}6Tan9sbhnyzY> zNcK^!#-hP{!Yps}vUeeLcggmESW4;sx*?(&t>^UZ34BF8qw%X1vByURs|_6uV}HPK1x?Z$~Je$&hbs4&&z_Hk1b_~V0_*To6mc7+* z*ehDp!`kc+oaCJ_E6pvkK>@K2(cH-vanz|#+F6h2r9tN9KVS} z!-B5}S8GSH8AaOgPJ<|v$y7*CiCdFPIHPVlB|za;9hOBOgLoaiS1)|WpK;_n9c_7W zybEKGvnF1g*p9aep%ocqdp+M`igacwlPf*XKTZ3sp3@B;qj(as96Ht#+ly1XY3maO zWD@~yB6tydBm3$hbm?KElbpryH3l3eypv22mg2Y#qi7TttG#Vhp)?QXgTV5%bBp?e zdwRZWquAbX-%#hxQXZxnDJ3%`QwW_cuUs~r73ivQoIRTy(=PPVFB>q2WI&b*mSt%x zVaA@0ZEYJklZ5e;v{bWvVmfz}+dq@r@YMzU{?EG6AnM|9Tbd9_;JrD+{<{m-CH{6lnwHp9kw6c@}bb{WXX8W zZq0$?U4-3*q8N=y&BV3om7-6Mm+6n_69fCL6MJ7<*!TAy@An} zGQnv|WM-)nTV7L>%8UX;ro}LKYiCn!KFEPAr;uVht0Gy~mQ9 z>DZ$y0*%^NZB9L!S7<0OMe^1D2y&MmgllW@mqZSx68E06G~T|hOY)7s5*6dl4(tjZ zGY9oWvVBR_9Q2PUlB>g9-u7^Uk@nB-x5>%9?KBx41q}LnbA+aF?TT)@|!p*kzAEg{kOwaO*hCGT( z!Itt*YmAF*yN`kuFJ++(JQ%`CxXXO55REsvP(hr6qFWF=C>n>vbj%G8xhEB6-c24Ig(O0naJf$QTdG2qi6&x%trQT-ciTG zx$#}XHYtg6^m}|UCX~bNTZF`t&1EcsV^5$*dp!4R?7)^T7zaKw3re}v%=OBFf!ldB zvI?!Pi_uMc=}3-tqZAw&u(Gzxo#LWy3#{4BHN{|<9yW|s)AQX)imf1*d9YjEp`15` zQ+nf(2t_{pi~HiB*j5zNy`F0D-=6#3bmoKtQ#IE)w#|u8%_YR>-C2jJ5i7^)&PiHX z?>s&HnN_O>?&xnb*5SeNl@kT61-c_hac0(3P&9zcVxP!GPHv!D%bc-*Crr&`b$>!l zy9#?W)%NqG<5!E^5vyh*>)^E7($Jp`7 z@|jJZrjX7O)f++peWl9l#(iIa4f|R0`{k2zgtvxxf_tDUA_{@?uCxgR`dfq%$dIXo zl_hQ^(wp)MuOLkRPm$A~P_22R1)9U#Cs#0V_crn>Rm@UMjB@vpC1RS#lnkYzhq`ja@RD4v153iqS~;AXzOueW20Cl zN?IGNQPsd@MuC1!Mh9Q4{q>A}*7<^zHrW`*&ef>9P-U9o(zs>}+Q2Ul6AqKDwFGb0 zfi9&AIq#c$+unhi@2|&Yokb!7QI_J(uT6dOr4sOab04?FUDSSQ&u;zY1QJ<6vFMI< z`OJ9~r=oppeW2LdQj=5_UPtO~nY zttqYrrziA{ql2MA6#z`n6Baq*R>3pFSB`a*g_h*QqWg z#tJgVERC47xz_Y6zCcU5Nsv|`bgo6_VLIQ^fY|BL49m?1;Td2-hz&D44pMVs%9vSu zl5^hb8cA|?rCqHbi)CqMR9G?;!a2d0xANfe;B!iRjyb6))iV9I=~E#hmvZm{(x)_nNm zm-oy_UH9jf?-o5TnK;(|Ymi@XpQi}9$&HFTLw|L-U4dq8N(O89H+Y(?{y}ihY}lTw zw?zw{!LH5Dzaz+*DE__W)cZI(TcVdx$J{X#(1)zk%_byqH_Hj#X^SxL4B-o7YoOqI zE97}%3Tz!|aB=rj9Qea>`v<&gJRoOIiAw~uR(V`qvNw7zu{@K`-3xiv*1`eY%SRFL zN@7s)D`a;otqtXzlenrGlECk5c3LPHq4zQg=A143GR2s<-8vJ=*WCe3wmgc$e6rROtvS-RZp&sjQoIoOYrni*< zQG^t{h*{q;nc{iZ9rss`ZvD`zV&MCc_6Hyez5Rryi;`eVYN+QL370OWQ*DFtSh&C% z=`#rnpT|kQD^IrQyDs^MdMkG1d&!wjT2OJrFCdE=cnGN#$?I`)kE#CS_?|5HNE)cz zjIZaS&GyKncupndWV(ykWQzafY~ER>vTz611=yzRs7G;z^)`yR z1j^4`m>0hvbzMclK&++iwVLUfHx9!nRmMK@`^)@5rt@0AZV3LUoFuppS{Yt^e;{IT zI)(Kt?@(Rl$U}?tbVxyKWcsknz>Yhn>FNtyg*py}cgMRL$Lawwdd_M#HOiXN-HSE#=sfrK+H_ z=3%o1;o>!8;>m~!D(3>=rv1F_WmqH)i+jocE}B`l&*t?=7t8QF^W@R!Vf&&hzX|V) zI8%UI-CrktkI+hbUF@;)A$)O0?Xull5aF@tG*ct5B;@G5>V~6I8|)?VC4%IJsLkr{ zi~uSt1BjfkdvFJ|0&kHh#j!o%tY1#sfdsm#ofz>$BGm1XX|?{~)V4yCJ;x8$1P4z7j?CqU$u3n$X|= zyLLR?t)GL=el_R~I3^c9R{4=|aZ2$!A!MG)JnzM1eJk*qC!uCzqGqRhcr`6ctBLHF@(oO<7VH_e}^;P>UiE3eBe~GIG(-^En6jQkqJDbW{|OZ z;q;~7Yq{-itqbAA^xQA+Z99CxS`clyIqL#(yGdJLu?U*GYf-A|H+6d0YYInyD1o{3 zs;7!#YVDuKY&8QAA^*Spgr5Giy)1ix9>Wr~=**KULExdmA0p`KW5(qc>!DBC8&_-u}Syp0pB?1UXD8h;Ul zFU=8Bi^8$-6H7Dgn}y+`ZDy+bIhJ-V1hI@B%O$Tzx*|ZZJYzRa1%W&~evF?d)~ILJ z7?W;UXM-uzsLa~ab-C(LsOso-RXy=`nP$me6ZJ|U9Q&nf)2X4PqZEG0xU(oakoT)@ zw{PQvlGn-(2ZGpiYa!KQuF=G10wZU^r$LB!Y5p41z_GY=c;%bPG_dUiv4kUD(ORvGyChO;cGz`(bFs*afY$CM1W@EQECIqu zLKA0O@t?ga4^2q0|1#FX_ujQ=nvWFbcd9%V6l68E>S;;>{4w3_E(hcv!lQlf3fUxt zBl=OJK)lMVsdgoNYYE%mdvZe#A3F@V%rDnnJN?0VanIAUEZg&;lVNc+0) zk3OP^DXYNMxgH`^NWyM#iptkR(JSWs6EniF7oCw2i+(RHR?HMfAG|wV$bWlIi+m+D z!Qs9ZRy3q$Jfc7l)M41IQ&?L>p%D7$w}POwJBR9wh#fSq9g;$QRN}`#CwYUN>iF{^ zZF0aTN=Z2!y>Cg8NiOb>2zo*8R~$Vel1d!Hmq`%d zgQ1Tuyz$^2`BCT_G}3)fxqQzSHzYa8kF>>PLL<*r#)Y8)L0}||`i`-qckC&az!{a8 z&={(38G2=(2xNH(y$9Y{o4?02QAQwD{kdC*@M(L%{~CSpldq?vNoxSCT0iMDhdv5Z z*UCPiOJayW)@L1g`0@_E04VR81VzkYJ_kYhWrp|5VSZBQm(HQy9*=k#RlVeN z7>im*MX=VoYdfg?At>^%aHSNuuH8{DwNP|NReuD}0M%*AHrc0_o!F2B+E5rk^SDtc zXXr#KY{o1)W~tj)q$Hb`1JU)_^t51(tRMEQDnE1)@@ zT+}XyCM@X2JuvOkV9_Ks!GN`+Do4wv~^xY%3lPw$XWA?|wLm~uOq2RYy z6ciM$F}aC1nvYAty7qT>;8AEw54==z2kYnh;0ht=%_|o(%O7J)O=PMA3b3S1qIT+{#NjRLhW{J;D0$w<>hLt()?J zf{|$5QcpfiVI$DSr%_`m;pmfQ#O(Xm6MJ2pfFOcP?-uE*1Q5z&R%$GcRh)05=wMB| zRzBKrKPm!nhto|}N$n49uqExJ2bPy~9JoWg4tl^+4Y|4F0nPK|_tbnBx6XSOu|P-`S96l1T)l2iZm$Uz^3w3y zY0%zG;OfBYCd=Fo@sKqCBQE_e^LV&Cp)mHefmE!V%h5k7qZ6I?xQAGL*P6OhP?nHIR-TSh7TwKgEW0!3cK0}Vyv57^ab`Xer=p zz_BziT%vLF8dq{ka6N5wWYcJ2ljdV;G|>Jd`!B#NN_q@a%t^ytdhA(B?!@rp&JwZ| zVTOT=#2yK(8sh?60-d$gjm`NtwYlK+Ur;Ix0*l+}rC9eT8SEPeoyj4Taw^&|Br>qA z<61T{shX)`oys>gp67Hy)4^}Kz2_&QF{~RECCAr1@niR>h`S3Z=yn5JwLB^hO6IjY zQYswd%L>%c zH8CgK?9qCMciuizq)OUsD5X;Rkvp!HsqEwD2<#FDMO?(XMlX)Cod%@Use?KXf@xxjj^gf?lXTC@U|Rud!mjs`}+!&78#vc^v8Q#5MHuaRu@;RDON2PCaf>%mlS_q^ z2j{7K2PX)m#5S;`ZQaV<G8l>NRseU1q)U0*%uUjeQB zZ!cq_l2UI?UB4Sp&MEfI)753_kLSt-N$rS-&MfOtu-rtx`I-YIl#$`6`+|=&*U>=3 zd=!zT0({)V@2=8#&$Kb&q9RNLm^n`evYs)Mo_AGD2>g7&mwgpDx`Ti>5qb2Gz^~*N zI(^&~sXTs0Bf9{@rKdH@T20qM%Y0_Ra1bVGfL%*k4=#NDQd0&(nn9F-KRU!hfDDril9{0x)pfCzNTk9G;=s^<&$C}rqPu7~ znRwtpge3ADp8mSKYG2k`dsXZE{@wR~vz&kR-@p6j>tFo~{k8b($oW_C_w(34e_z%g zf4waGc`W%?X;162@7uW_zgp7gZ(o*wq33!1*dM<8zI^MEzW?F3zx~ZOuYdLWg+DgN z>v!J2h?n!%k1yx%|4RP)>%aK&zI^$^Pyh7e-~aIAE4u7|{_syNTl&)y zL}GpW`|4+Ywf@!e^BQ>Szx(F@ZH0fn==bH@^1IJ-|LM8@xHx~j;g>(H&`*El$5;6I zfq$_qKW_B@{fU2CBCGYMn}43*)WFZXdD=}4os_f>5>*OFTrWpSsyrsdi0-p;kw zyI)6hw^B<#-dCl4^(f18v@%NFue0U*F4wVMN2<$p+)qDxUa$96_x*VK>J+Ey%`dN<3pL+B8pQCQqzSgIYtuI%<+E_jGVQKn)rRl0G?Rnk3 zK=W{R96RzVqwsRTFHtMq1e2>!Y_i~hTPpP>3d0kJR_G{nU z)$6%ly_{=4&w8{ct+nL)EZ4Htt3BJ1`p>!iiH+mgm#g(v63)U)8GLk67t-Key{y-%t4Db=}u}+wa=;vZk}% zdwKTlSa%7smfrS$?)S8Y*Pdn4m3s1cwWZX?z7DT{-1*)%OX8K=98vP(ifCNK z(qTbIJMZ4lO`7COMx5LG36Fa_kEfKmzwJPUNU((p0Sox``)*^K7L)7TC88bkL8I(J@RzbE%2Ug zx2<;Iy3TR8UD}-MS#FtAo&$uiw(ol>7i-@3W4GXP00jXx^2R<MV_Q| z*Lz#8sYdOO{IHvw@?z(X{gGNy``(V*26PL1E2q=F>Yhp}Suo0t^Xl8W?d#p5)~zaI z8*Senb?g0v;~eSOYdPw;N4`rWKoi2bCVQ+LELo1HsXJ^E@ZR^L-lvG)+!Tu^UGNLBHpt)gkefTqW?_hpMnfIcd>p-YdV=rZfv4YpF7CGFLgf zj+$k4-uvEEaRu{zl{VD!sESmn>$xuWC_l$FMje-pUy8&OnNvhPOG?N3G>i6V(Yw6e zOUdhVd%4;UZN95OKck;dofkLR4$1Ue!mQh)-Fm9A%70suOZJetOci^ivmg7l%G*O6 z!vP|`sHMV7l-cK_Rz>~&UW=dr7Ot+?pQE1Jb3B*YuO5!$z7z+KcdyeE%7iEPN3i;YE^-_PEBE5b)PFKkM}jAS}HonTy0MKaUS(f zrTI_O8@Z?!$EtBxUhs){ZDWoPnpfE78@;lnbo#t=-v+^!sT zeQr5bo98&naw(DWrQKk(Lv>x0 zjM3~6!H;w`H9Xu#k>VB&t3c3 z({O$EpSDPTM0p(hDh9?aJm)nG2y!V8PmvpI6A*CMGp+%zTmj;2-+fH`l~m>8iP{MY zjcro{tT|l!z#cZ_IxJ4Qr`3QaZ}qvRH2P(~bTW0ULR`C@h6Bam1tz{@!-Lz-JET>ZT^*NibZVqIL=KFs#5u}di@YBQ1|qNRWDva>N2V& z&`pNG`;=0>-16+UZ`tzH&DSex-es@E6-+I0w%f9l?P-wTtv{^5)l_?rw(0ubr^@=< z%cSk9eor`n_;wImQtPH&))~}FNZHf%8}um`=%riks7+T=dXCyK*Qn#t)l#m2gG#o6 z4bZ}FX8{gLspCZ9pK@7CuCPYgN{1CBcG4kW*vPCykR-iBpPimCL+1PWphRcKxK3q5 z{aBVNv4zdBq`GFAA`0TEW7vHN4@Nuy%ScgakMdZ;Iu5Hj$Hrx|8Yrl@9Gm#hLB8Vk zwx&?8pzpR7`1T!98SUBiJm5GLSkc46=|V#}*|I$e;I`Chmj#5x#0}jJDmu?7(H)Y< z0lnx7dK~KWbk*poXT@F32EDd&JZj+$!I!imuy%5|c1*oeER}nEfS#l6%Rqu2g$j;U z)-(X9Q7C)l>AisJZ68}S;b;pzFn`k!R=?l(TOa(CgZEWi$I5gbaiOe5pUh`k)}$-n z_XXMxd6MV;mAu5NSxNm1%=Rj^+O6WfG|kg~Tu(ZdDVMRfjHL)drMd%0^|^tdu9F{S z1z~QvROdK6+p=!*2PU_j`$3wGcQz6{)rW_B~b+dIL0C zxz*Hlq%vLA^%D5T&h(l$LO=px(Yzo$g7O|BOomNY`7^d(+4L?62vXzrT}y)mz_r~P zKFKpr`&F0KPxWT2CV`MER$av%M1j)OybsmXJl^kxmK3nfE8{t2k=QGcY7BiTT|3t2 zZgosCkB_W=s)Y9*pi~Rq(nQloZHSKcj3&|F_e$>2L9V;z;4*81K>@N=>w@9*FVGhL zn)=rcy<)k#KQ4<#7W=m(wWQZe#-im(TWnK3JQa-RfQ^N#MLRSGPyt89mVp6PCkeD? z$}cnthhOZ=hpBaBdxM-x)l_PYoxBSiHpO9&fZYSE(QC2vQ~kqnV00m7Ro?F<$MY$R+O_Bqs@yI~(CiD4dOV+6eD1pR`zm$X z+AE(*uXKOt4f-a;OEWuBB1#ZGIwK;|ml{p|IF#R(o>gH~HT3bcEJ%KUB;;V~L;)>A zHb`0ot;j`Sw$vseU{~~}EP4?4_Y;C8BES+HK@YVYSqb8#y!sm=r3E&9ZCZ`z%$~pC zGC!!1x24$YE5H zLk8I7hX{}Uo2AYq>%KdKBIQ4qmo0#LGDAfhz)!1nQdFRdzjriziT zRp~h_H*6AwPXn?Rw8*S@ysx6A!e@d__k}ec%%NYXlPDqTjV{Xp@H}M-D5GN5vQR?& zOmKWWD5ge4AQCASg>;sttc3FM4RR@%O{@?HhYb0VJu5zdMXy@ev>N$Awdx*Jq@8-A zinQ6fgGRqKp_H^IV#(CM%JXRcirS(T1Q=Kd5O-<&qvQqSyH9=P`o$CaQDBW62H{T) zlPVWF>H*2A^kfV_V%GGALbL8s*`)Lts<5sAIf4I+E@LD>hB)&Shv}gv?Z#rHl^1<*jpRKCDn)x&g&D!?&s_ z$~6DuJU0zfWD{wpI8>4f#&jEdZMJ(On=KDkU#W4~EvuzXiJ&4=+(Rx(0?jOX>G}NiStdERuJrcpwn`FRBdc z2Ay9tEBey;eH8&n-GogfB4M{kdo{~{O(7ux>arzV_UT?^Z@^aE8i;gNgD7Zzc34V? zCt9cpT}+PN_kxnesp6{7XEj1C$eOySAPWrRfvP+C-S<^AkVPJC9-`!sh7i&NVDqAi zZ>dA3ViVJQ{X7l)Xa8w>LOf#=J3I1zLjAO!BH!(TP@<)ybmZJ&e*g1$+4PpwkHl)% znchY}kh^lbqGR$#j#6gGzWdZuQXl)NpDZMePz*(e0Srb;aZF?S8N$1Rbx0| zR;Hv8P*f*niR6wIVO5lNaPR$uy4J1W(UG&cXw2CxKb-?NIUsf|8RDzwgy*Rl#>B-E zYv@QOW*O?^FdHJ+jMix-*h9aI=7l4mbsFP*ZH@bT0&35LqW+ z4PyX}v|>|lia636ppH)1EVhX4bU<|TC&YgGJBy=ZO>*tXZ zY9obI6w{-0jSa+-6@Kb_4M{oP;oh%CWY|~mjuz-Y_l;R90Wn@oK>howpdSgwf zCTN5g<Ie}LM+R-oEtQ&G!I@QkdA>GGK#|pc zg)$okC{ITt15$oJ>MuO|elPPUBw&POS*(C!yy+Tow@(E4`A@ zMJT?MC)19cj3NU%}UDlb8( za~d@HhTTHN!oxlBd*y>}m?R}Jh+ixO5VOr)0!tEdHo9XviYC6Vg4?+alIm`zzH>VC z*xd`>Gqqd^wAhq8PCZ2(H$BpC5HFIREkhq@E97&tVx+iM)##q~3-l+6g5nS}VhtHm zAh1HY>L&;#^nhYEtziR{GNw)PF;|O;9BeKDD$2-i8*Cb7`yC`t*dxu!y<#L6mq7F%1ZWl$a~xcQm&HV9u7{@S_es?=WmX-;PCz^Q z4b3wKBM8i8-JdBoMlnHfi_o|X*2Ov;L=}ptHquT?_y9B~H8my<#%ge?B26|tXbuS? zgwUlq6o~A3^h!p*zm(c-)Fq(c(_hRi~Mb#tqaLf|fiRqAu zUgK`?PZBfe@Dx#D;-O4Z_Wg9w9~q@fz_gT=uGEN0Ba$(pN)KXKgX5{CQ4_&l6Wkp) zkjiiv79mQusT1{vXACbxl7f6k*gD;}l}07@Kw(J1~ucAZ?Wjpm9g^57r$2 z9P!L~7G8<>8iMWzTLMnGprr%|Ro%#2ht!?)Jw@&lU+=5dPU%y2`D9Qkv#_qNBLj4P zh=C%m4iLGfTvj$?{2we$(wNK_*r66ESON)5tqUog*U*>+x{0a3PKx9j4Ujw%*m4{A zb+weVz#RB~!XOI41BnI7uPQGP$f<@EBF@R7%c|1(be2p(a%+OF%ZGqMX(s2nog;x^>qs zDSafCS;g7`XxS(uwPrOSq)sCn^UzR$bIeUpn?g)yDYE@h7{sFG(Fu*Zv&M^cvFO)s;$=&Qz;15f1(-E zTk(YVy%2ZOC;0`gv~ju$kYntt(P}L0RgcT@+4oiOZX!r%T_0fxYFb#LO^7T@%sAK` zdBSGLf`u~JDJT(wjuECM#8?4?>j*~zCQ8@ld1VZ|Ij>^24j?2>q;QA$Uownx#oKL+ zE<4{(2#)JBSS~tsGDvYB0C~Wvc^}TZ7bMVFVBY1V?SG6l05(Le?cC#Pmwp^gl%7Wz z(G%bI!uib!z*|3@NNiMuK3E@cfin2yMV1=ZO^ArnZk9*DFrraK0V50=YSx6{FeJr{ z%Zg5D*&rn^6|p|HU^YN}KwnkCK%Z~S{la53djjb#;Rz(SC(Cv4VtZ7xThl(`T$pHB za+ycp?`7hvZxP!<3j(JE2P2R{S*XzzLgm!#txQ>IEBL9%IsxV7y0jOVRyV1`freik7Mfls_qY9X$u@V@?>dws4h*6pD<-KgJ#J_6ida@6?ATfvI0OB=u zQ-W>76dN>5ii>0qJY}+Pq|9d-GC5E3Nz5}krSwhR!Ft1@P_n_ONhMQj_;&DGWf#Z9 z))``q)86lO+mP5N+*mv1Eg`hoBEd(z03DoSWL9QbRYR<>O1)xIi1+Bkm< zQw4B4D`tMKDhUx;dkKls+aSfwNYQ*lQZqu~(Nkr^HduKDIv|7P5Q;ESbQYHrS9cTz zQF=|tVJF5BqRqM9Nf2Nm6m`G5VeNUeb#Mk1-=yuR!Ed)?f~9xa)DBs=V8ib z=tP3)UJ(UUKp`a-pD64MQ!!IuLx|~!Lo{nxE8b6-$g@_=fc<1Hu_S^~2{9KDxG`10 zv*4wviURr#fZEM?O|%JDJi7r3P)aMw$*)dDy?Wn^rFu7GjzbvF6^`RWPMZRAE&@hC zISdx&p!Zc9n1Y%UOq~v~btHW3Rc|)2jT1`aGo+o>9+%M7;^9e__zjpsL&Jldk8ndg zAnUXD6j8?J&^X{mjthe&;|5^PeqqBSMmRap$9?L0R!u5v!=}oleKN?R$(YKaLZGk7 zsgY!M zdK&&1Cc*br7k{MfB$ff6G5IA}JWjtvwyKX7QMXZ&r>l@yX)hZ(iwXSGI5`nMU52J0 zUnt)ecZv;;Ef#sA5~Y%Y7b1R`b`ABqgI~1ba1pV>yzk}U2CnIKK|)cA$g>8LYalOe z+3j#n3~tIwl)^9|$~$S_><1BLT4GYw5jM-Kb%YE%y%#Tv^~Quivqus~s3R>}Iu2VI zCc?NYINndFx;v_5qGWCr2e^=^2z{s1IB*}P4)ZYJW7;oSVG zf}tW7>=I+j%71pI^z*-*){uAlV{4@CMGW2qEM=8g97b)r-N!HPFwFbCECJg)OOAIx znBnwg>qcp!JKoxz?Q-sE?nH{DIKl!DtxOp)uT>e57RuZO`ciVpm;3Zyq=*D4vC4wh zW{5Ydfg!0F1VcGtL9eLaQ+{!i8P8I^FYPA!-5kO+vF9u45OF<-M z4?F=cf%TvUngN<_G9}fSgWjtlYx%e6oAe5{HA*%Y(SxPz5M}TNOdZ>sYLxU+V_0e= zisDIoZG5Wc32c4pMc7Bk2Yu23u(r(m8gYx2a1H5|KpIR%-9+veq}%C+>8g*3kU0m! z)~u*yj(uVrVLRiTLC`~k8S}lUB@h|993yT0^aNKs%uDJh5>rd#&(PQ3s{zJ@$j;p3 zAXXej@-Q$KN(NST!6ev>H481lXs+(-;z zJG3V%8@8eZ1odN0*YDH-70i>quL9@euxN=t%{fGtjszog&|SGCpl%S{v8L%NZW@&k zT^97wALERpXVo@Y9hn2-Xpy&gUxnSuc&;0W1PQfoQdbedNl7C1+=)y_ap$c3oRMQ( zR&(SawPP1H|9B80P7(uwS_X}a@}@6}+4KnC># z`ZzD7IVT9l>JY~ySd1tYW`*AOeU;x75o{Elo1GnkTmhv7{h~&JUF-`q!ubR!9_duE z5g9Uy?a49QGg;!&2l9%g#CZbhSDf-w_u$w4{`JdXSMAIE*gCP+9B%8{#nfB`3|oycE9#oHfP9V%;@3 zMH`rOwR#b7Bd|58*@4Db5+_OCPiWMDY6P@qg7rtChcKxU!w_jUNd;fF&ArmmYRrW8 z!LeeJB`je)A_Kgkox!G=gr2{a@swq0U#(^I79fNM_ci^@!-I zW*F*8NKZ%>phKGm2(<%u(UkhUU#tpY?$>`|P}W&#MLHnn2fQ)+2*{`@zSmF^#mL6q zvhktjL3h>oSiyif1PPeQ!P@s#bU??`KOSi^BSkAFBAl=no!E9kR3 z9H+*ac%W-z-_UJ8PkdvUO;o?%OD{3b^JxQOBaMwWO7md>4bQp-bZm#RN|1P8MKQw; zjp@aosUO>srANf1Nd<}YLurIWysvU-m|hCh8;Qn08>m{I2>k#zP@QsO*!94b@|l=3Kuvr=JngO+W+J3%{#Dib5(5}ti&#YafTI2Eh;%m^Jq z$)oR(D znHeQHR8btbeuWv#kMZ8fO|Gl3*1T69tuqC5YF1%0`6ExUwisk%O3cFM0_L82(xe1Z z;}}@se!`{_JK$xFQmOX22JUol$|C(jX_wr%t8bXtR+Wn4q6VlwG(-x?$All!%@p&W zvrc|KtBa*$a1vL%65IrI5}rhk21b|}r6_swo?p~C5pB;WNj`igY{|Stm^uJp$~D!f zI944bHB^0x#Hf!ws{o*h^@k)5%$E@GUPJE^svXixEeJeG$<;Bny*PVzInT)q;-BAF z#R0MS_JYY%m{%xJ${>bUV zV#7IuLkn^jo;>ixpXM#IAkm)HEt5>GdCp}&i~L3l@;x1?3s9QnTROXMTx4;W?sT?k zzv5deVhxJMZ-gPqnqL8O0;b>$o_b$P5ZCYb5`sR56P6nQhs;R$i^D#wrMi%zakwDW zg6Uoa4XBm38WA^&4$d9tK}cKJSco$~7J+8suvBE<1n^!AJ*<*w9x!hXr6q8~i2Bf- z49MS4s5&v&NKD>I-OuR4XkQG;A6SRSons6~nz)`vf|vv&la~O8OzErGxrpQqPcU3Z@I`ITN@${PD=<_&f%{V(&x14w+Q?O{fxgr0kl0;y-5pu!blB{h>xUOuRUQ zPU4ynkrB5Shx+3%s&S&Qh(i%Z%9^LyDE#Gv9OK+5_ZyPEr+TUholus;*f!Yt~Y<3Nkah3(P|Qk>9VL4u*eSlNg@aw zYk_zn-uH^L8V@2vE;Hs7s^JiYxQ+LGiU-(-0T_L!)u4KNFw3U(aTChVII9pc3KB)< z!J7f8+25+o_R{M! zPaGZl$X`?RW?@LXW(p;>V9A7GXp)Q{E?Tds+mO=Av>F5|6BTLz$m!UNgOLbL7%{>T z6az(P;{c_p+K52}+`(-+-p$ntIIAM05;h={Kwdurl_!3$Jo%DY_c<17p}`tY=PT4Lh#>FnDX+$8=Q;vOT46#Fvz4y+qMc zq~lnE53ks4r}y$yZDczS5a$6$KOe8G9?6MNgX$t>U_MF^EytB`nyPg)vQF=fJl5%Q3U~=cv?er&C-evdn%3nTxSA! zvYf?JNzIS(7IkH+hbFc@mjENOrNl=apddS|?G6#6{OB9n^fJYKcmTs$O27J?rAIW< z$8+Eu15jX^gC-F^-uuN287DkKgLXVJM*BG52hcKPSP^^Tw9q^$4i7Jd!$wBkrAqQ` zQ2t1HLc|o11Psoe(=O9W^eI0!&c26hL@R+gj7b1>kR4>xuje=Vu@=GC zbihUZU82MQk-^_!@cmv6ADx6UFwHb3L}A_!>Y!6|0TvDF1zD7x_f;kg2EytWbRgp6 z9b{OTmt~xf{SXM?{rL%XHXc&BEG$?I{0N+s=*~+N2Udc5qsB7^uv)f-^OrAN=jTl` zb>tOQ!c`44jP=?XoQd6Vo%w;ZXtWfX*0RDDOtF9n@R|^#bkf=NVS>k!c*LdNJ-{N;xleZ(=%*~GjdOS4<-I3V)4EghT%Jlew->} zs-ac^O_W&JmrZ~elN|yvM0#IkYb6&gg*c-LsK2JX1SkV0s8=_m&e_YU&hs!Zcx!Lr zGf;NO8AKzRqQ%#R;0A4stK)qyE6(~BXbaF2AtZ~?<_J9w$Y3Jz1-s-!Lhq}59m1f+ z0S7cdks}sZj_68@(I}UWb49x5?BT@MnNmm+@JxJ!g_ST|M?P~CE|)OrlEi?~_q~J* zeFv3+vmM+PghW&wkINE^24Ll>WBgO5P?7Z^77~;Qrov-U8E?Z_0&#d^=3T2bP0joy z#Rk(b5^|g>j?Fry4+SvoQQ6L$6OyJfJ)ugb>hhj=A+JqHQluReWMhqw9+*np0&+}s z1B8rEdcwxx>|@MxnqB8d-ar=V6UQO^oC$AO;DGXA9+6E7Yb)pt-e3b|-~{hA z3{_o!Rmgbkh(%haXU8sP z#{%?9o`{cPX?$9yVU32sV;sBs z*QcSq?SwwX*d_EWw$mM ze#|gt9ovvc7v;8(ldXy;Vq$U)bA(L1JHW3`(sZWS&>eh0nPW7J92f&)7wjVh0bfu; zyqOR6>8TSr<7ZGt&H#_igiLV^`8*`&eH?@}%GH+T%X^ne24?x;s|N3i&nxbZxv}Y( zR)fjDS+^jxQewJ_uN#n#Pl*|egjE4j@wGc!NNKPmWOFA9rW&QPhQ(XL#ysB(7;LBk z?&Ii)0l(=?d~tB98^-BMC9tp5((_`-MhRu42>YUJ4hD|2P4A^RP~$z5v3{JM=b;xL zzPx5GL!npsMFCj!elK72jSrc|_qND1%;T&?&S05wL=X3=KSr1kFWy(_Av|SK`68(d zUBD=G9E~y?WF;b~a7)bJW-;3k+~6^r7j_?P4|-1)gikUK)PVTDNiyBbmx`j!W6})z z@fnG5_l5OA{#2h6&=76r>{=ubD{R5ka73xdcf`URVjV;d+N!In*37>gX$l>Pzn}Gp z4r089-EYB8rJIMKUGiN_y^_;5*)40u5EW7&(Ew!AODragg7?LD1t#0kohNflB#_aow=Ma2oB_sUbQcyoO-8BjDQQrJ+2PY(|u8$)mwCK|~5Dh54o@0F?w z9VQ+&LM{Tu#^OK{JcThbnrf8NPN#$i)fSZ65fCe48{x_VAblbbDW=TgQib7&%1&6#+39>3C51#tFLGdweBissK(^^EwLw z5=B6U@SDY_+?;?6)0l>RfF!ejo-9ThH!5>n(cr>&co2s`u{1t#814zFdd*AXIw z2-rmSI0_P<)(WjTA~}2_V5)Nr(`s<6m?e?%`SB11(^VBsEMc?`kJ7=EtYjwkUJXY3 zIJUqlq#BZyh`ZD*&I5V~R2cx4j(ku3i=%^88A-Fw*xF{e|M7~lDcL-NGnWfUKj97j z27WMOA@i2_B%FE;hkrUke8Cnph$9Gn`Re^qcE9?=U;O%C$^TWrpC9wjEAZzP`2TDL F{tpebGKK&E literal 9923 zcmV;!COp|fT4*^jL0KkKS#&$sw*Uw^|Ha?b0DxEp|MGA{R0{w1{%Qb#00AHX2nb*< zKNxv8uQ}yf(rj#M*0rN5rkX94s?v;-Rje8|WlK|AEfomMLQ(?OqgZTdQmZnywUxE3 zl&T6vLifAT<7y2|vZ{?LwWcPtn;BaLHA`Bsl+d=;lTm~at->mPnJ`8alp+8C00000 zBB$U0g(^@zL(~8OGynhqYAKV-dQU^-e!$%d0rJJDTA_Np0{| zMT;0X=*Cmh5ssoFCI|yf2HqVyWq|?+$IAqei7^0>+^GbT6>R_{Bf5}*Ey^r6LIuW` z@6*_J`THX_kC$wzt9gu3{w{F9hi*`JDfpH$-|9v^)-i;68pEA# z5h3q658B(~mMuCQ6T6JuDV(EGw$X8J2s_c>8TJAsfbqyOun?`qjW|8UpzYdlG~8%x zId(>HD;#Sf)GqKs?{}1xzG+_F(jypRFbmo#`G)1CmDABZ6~?8{8W8X=yf<1tJ2zww zM-K%Y3o%yj$LKF(jMSaw1|jcPT1AbZGm+4|b?6?@=5s7$)veacos3h{<*G2MTO?tP zqZS_s5~Np6?cZ8E03=1pR~s$9%W!hKY9)OWO*@!%b%;p|QAt|o&FP=eF0ktb0ImUy zV*@?zScGlL7UW>y2b&O-3RBRr(jsV&ge-a)MHAaaGML)Hv|zZ$i(A_*AYp%1Bw+&e zKQrNPS;OA^a8B*fSH6464}uYFdCKmiT*mAA-9JQe9z5`G%med9VU0)PMVN#)?dzDx6^T3%O8A|Kt;3ee68^K7#%E%@f#e%|VmSfe+4wY^9P;(~I_SC5*; zNtU&4;~thyMIuXBrc=Uzo-L$Q-xH;Eu{Tv+#MIY~r+fuQny9h_P zkR7dHH#zk95lqe{;6wYKb7{{a`xf-MvU!E;=G0GO+~d1?LEt2zR){LbSdJzHz9sgc z$96k7^)pu|oK5cos#Nil;iPYOeO9FGWhG6j^LTg|k)+qtHZ-UU8c!2Ukr4}xPAm05 zdA&23t`X#n!HKo`tXZvdR0eC-m+r@Pvv;tG>MwMR?~h*YFBTXmU^-$k!5ReEfkI4} z4848c8+GRAj$3*{Xt+Yz;xSb$7&ok~A#TOQN}FlfS2ztCYZbVs-owFj0EtAZcXK78 zJ<_+>h$l{^L-YrQfMbk$lA*rJ@+@IeQP!pI96O-q7D~}aDo7}h?D(RGLpiR4K#nyH zicUk2gjic?W4?glZeFN4S0CKS(`+4vD(kiR!Qd##m}A~PGVQk1>I}kxM~O9rJhunbrRECHoj;lp8^jOD2T(eF0E+t zlyI!MDy+i^eU5B84hq1yq1Ov*FN{5gIb0KMCuZ~oSBekT`iJ4c%jrEWmG|xHxhc#iO>bp{zjD zc@OW}XSL%?>)ZwNq-TSgN;Jux zg>K#rXfa^ZYAzI$kWxHc3Z+cY?qDs1u2)kdnkYd!#@Yien}~VG?1EdZ_mcoZ?@FO? zU1P)!`U~NOlw@|k7)Gb5D}+0jnlF!cBPC-E=pJ{f!@&lw%dNKpN6ZK+`A!uou4!|v z1HpKxqM+(26-wgKzrXw&TwHed@-7R4TZ|$ zdhtd`_MJ&jLT8NG*U`xFiJ51EmL-X~JQRq9VuMq(xpc454V&VgKWt4&0<~5&`L8Ja zQP~ywE3;A5TxmI}>bUOg92QV2fP+Tp27Io*9v*lptKGUgbgnfTqW6JvyxwoIcg$rV zMQVF#)bve-fe>tah3mw8nd}-9A{Tl?m)W^iXksM3$S{NEZ;s6c?TV7XYPynb-#;C^ z4JPr2X)BkzGE81pyQ?TyhqJ)0bn%?|S>8&JrK4cW>#%)w&fg+{vt z0m;m4P98*{dy|EJWjD#d5hAvYR`W?3+byAKL9GoPF%NHWIQQBXpKyUGgJ)fpgSqUr zN~OS#PP7ygPR7ha2bAT0+pBWruIHyZyk7WbM)MpQyTlpGz?!^X?`AS{2q$v4jqQL| z#U!3Z+LXGL(|REaCeDNen6j%n=HEL;B-RrEAGq%q*&jTejMFqH&#`Sg>Pyo^g7 zNY;Js?fvo#S`UaCCkvLNJ*^I{pK0z1%q&*iM|!r9C5@YPR5fx{E}i*m(WvQqac&h!V;V6oBq;8BN?N(NV>(yal~G>{Y#!};IHSn$ zGo6{`V;`7XroI|JrI2KFYH0;lN_lAH)adgMh7pNexL*S9>VwZyxcw}l+;LtpPZ@BQ1C zN#>^87H2&gPPux5jJi-n4s^qa{a8eFCGFv?jp{cdW6G-<&}!o4|2_T>)^ z&OyY1BzZf525UH!;0XI2W7e_?Lyh2F_oUq&7PBet`Jn9BOwyJ!r$%hFi0|}R@r>&gXFt9|lL41|jCfPP{ycmv->kSFbchr&b2`dtjsv z9GdN&(e{lV?pLId4jmAbIGL&nuuBWo_XBgXHZ_ZhUeoUp9JPF;yPzzqHmTRv@%33w zR#T9aD%E`DknviwdW4( z9(j-`3~!oK)11;Y(mbhktU@8EF_GqQX>4!YG0#Q!mEou31(HPiZyGb;`BGKWqMdwE zpBbtR1hf?m(wWguhf)-kgM*ocL#%p=PVAlMm*WE>v(_EHY)Hm&T$!#~Dc5v5wjH}P z4dV0^fshvPry!YnwbJZF;HLx`Sg8<6X^MCR{MP6`oK7yS6)a14S-CM_sTT>TwTKma z$l>dX&nCkMM!R%+Yp|~XyL>NDFD7z{34wyb0pfMr2PD(9mDNu;SvQafW_oG9c50&0 zGFuGba-~j;QEKk!N%tAW=>3L616K=NNLM2Wl1Gx2mZ&d*jEk#miyG^r-i;PXI*7 zXkv}rz&M!b5J6a_XL*oy__DNDA-SIUAd|X09QvO6Xlb1#Z@IXJV&L%iC*OH9ejo4xqnF0$gbB(+Bqc>?7&t9sDP zXv7M0wL_}}SIN*U3AZjnH2~wktZc6YjE3?y4DNTRru?#C-kdBW4f06yULUT#=vH8c z^+Nn%PD0t0l8g>VF&nz9=<;pKC805?#(b#!j_2L!tXVR91$L%kcfJXKwe;&$zE>yg zJdAP>al4*dTyp0Fz)Kb^j`(T~xG6nBv6b<7_OBOe_~qMP^stuIQOwR?iIH#tI8x3@dJ|Q_1MD}6vz89^s4n}?LRY-wYAl zi1TW4ZJ$tpibR7cAG+Kbt#_RkTa9N~XIQdn@bw#dlLB6XONw1F>a;`U_%DdXddhy< zs0}sgJD})4mfdr`?o_~+c}o zVoP=K%`(4A=V^BHkG^8 zD*o^Z4QYVZcBS(|dz8YdP&-$zH1sif*My!=vo4(LzUIjn0IGn@(!cuS{1$y6X=1-U56#q!0suFrX{^HLCCRVm~_{e?Q=kcpn)9eeWj*{j~gMQUFg@Q zio}7)6RQg?&d6PgTknrZwmR-az^@`Cn4{|CW9E-&5po8S(5CO(rY zD3@Z4%bWEW>T%dPZ7q`uxXiWfoY<%8Y&EyD z+LV51>5;~%@0`QD$6Tjd2cwZd?1F?xKz$RjqwT>qakH zF3UKU_BY*Sz_*T(CCUVKCmp4|H$w683u0Z{oCzIldgYH512F}B{e!M)KuL$Ly-my* z$Vyxwx-uBVQ_l`_fF)j-d(2~p-W}f(H9R#V)weMC&syA6Hew(O8a$07A)6swHu$MY zO%|g<=0KxvYmwWWfHnu#*j8~Nc#9C#+|L8*iDB>uc|*;P&xEYCVROw)NGN1Re1*#q z&S{S?TaC4)RY($ABTo@UQ^3CA1=q5dSoe$LWD22>T`(2v95sxT5}Vs$j!$P;GAv6G zn8VN9SXE65LC9`_y!bg{lD*$>=zNfrXPOws=zZIhXmg0Sbl4NOxq@j`;<8(F4c$wv zTV&jf@=>1^4>`ulr?y2NyfR{uwQ+jeYpR^`B=(34kWDpyo#l+vE#=zzpid}am~kiy z5iP51jHZ)Wn*E;RYfgb4mE=JnNQN#4_~9m)*?N(E4euqpHP)ax9s zDfTHJ2CQoC2zafX7B3=qQEKQH8qZU}{6H?xPTl&AY+=kRR%fPu506_AaZrY(mKp_j z-NP3}?YT<`Y)#{%AnTt5uOe?AX4;@c+?8F69(h)N0d3{dHZ0?av7T%>U2 zkPzmX^HZ877~5M`*dxN?iH5xc-p3s(Ajum*q+8tE(U zWe^e6`^ywsI-* zb&g|W%?c{doQ#p*JjK3S-f*DAQp1JgCsZk(TCHg>IQN!G&J)_Nw7ey=!#^4>LNw^K zE4|Czvrg8(F4>x@3*GViMxG739oOkIoh{8H!R6Gl0kx?j4QX)n;X>0(koNvwW)1YZ z=eEqETQ?S^UAf_Q;Kpvd^PPoa9i|i;3a5<5tg9zwX1B+y4U(ex%CJ;dAZ>YduKU*I z$&w?IS4LLUBTuY2PDa(~ZUjkrt9qwqL3lY^+@tC*8_8l?8!<1wmX>upJ-5QCiP=euU%U*wo|&xtE7}IEgUMM=dKo4EAXPX znLQ+aiU6~Q2olp;EalJ4PTrHrG!eA?bp z?2W;4&s%+p%M0h|&2C(!>C^Sf(I>)AO8M0{ey*gcDtckGIAuwPBQP>#Vvs#>k? zM|SjOa4IteF_60~{6TSfgCLG7^%-5~j0$6%#T;srXAZKwFw;O?=$cfl;DZdn zFiUf2GDJ54BvLoIj~9L_%2I>?)f_9&BaRQuvB)wYi zMOS)4Fd(VcxG~$@WFDNh%8)CppOq2oRnUo$N<8r?pV) zBghk~z1MKt?~zr#$Og3sdiD%0Ma)__Kt&6O7S*u0Vbcxs;AblPi@BLpe3oJEh0D<$ zVX}d}J#AM9lJz{qyM{ju$j@^ybUEx~7`1dq8j~Y2J-6N-srtgKAn&1m&Vk$XicxtH z9!0sZ>#h+D?sBd49Zcr&65b2auQ6}$3lOI}UA~lI@SO#RZ6jtJfx3CcwaV;V?jN}a z?=7f<^~1O8B_3j7YvEG%CR=)D1L$*;lvSylYvKBj1ZZ}L9Sk#a1eAU0e*$|LSm&k1|g8E1HF{YQtltJE6T?Uh&M^m&r^``Sq6|E$;LQ=`X6DO z0e8J3%XYof*m=c&C%8ddVDm!{s%fFzuT)Lm^>M)Htmer+%2J)A?e;YWuJAgBH62}E zqm6VY&t0CtbPcwa^a#w&=x$t^+u)>J+tCZiKlQ2H?M3l^ws58W&jAf((rSiY;AUg} zM1uN0#6^G}^t@c*#&z%P5&4(?n7-AtFe0D-G!+Qo^>3Y57HF+u6V>CzV{KBReMVp>CKa{Q#jTo4`qBD6;qrP;GMUDkS7Ew{TkOZVq{zgkEa`%;8STr8dO3K>lZwTC)mWjQ`F z=&bRw>oX>J4iK!qVeSE==EfIs-xnf<8Q`5x8-teB$WsEzdF;4cWK(J}cq`c+ZyO^3 z?+b~+%BaOR5akEA7IebcI5Wzsg++}v&Xz24(y4px$2e7+JSeHHh~n3g78NZTqX3qI zIEiomKI!|%YCA%!o_()2f|@Ovq0w^R=_T)p&hHC!e$vsm8NJ|cQ;|8CiS2@Ps`Lt* zdytETiml=Z3XvT}Q&I=(--uIPJZcFgob#?@&wAo5h^sY-)w-8?DPVr#6wCR9?YqVh zd>qC(qlWGDZtlAPo;xoQ*;lOrD&PJ8km~ei>#V^+Y4TI!?4)dgJmUupN!}PuOUk9> zp8}9pm3Bkn1&;Lf+x&;mrX6~?wzUYiZoTgG!)g9-5;rJ}PMMR_($n25#V$=d`klIvQ|q(h zX1IBJpdp^LqYk$EO(QyQJ_7?`$IZ}shs@OXXb9}y9WH&>aFVqzR=CJlF}4bq>oN+X zfb)a9j1V2swp9c)y=!)>#+sW)Wlf}4<(_X|Mr}C)*H18hysZSS0Wvbk4(j9Zgxi-^ zbkL&(DFoKU@E7XnDoAd~HScwc$;N=5Xf{algrdX1qkK>qWlhm8Jx5sOi#I4Ts*4s6 zi;0fhP(11?z8OuAKym2>7FT*vGG);wGq5MF?gw&Xn#LJ}@WasL?SlGm(hN^q4FIOa zwGO3l?qoo=>wTECu8rE;3d0n9OhQ0HRr3!QFS$Z%Izjw%-5+p$yPxKcxall6gk7O$QjSPqFM zc*SYm1ofaW9G{nnDNxzGWqDt12-!Se9)vhBVsJ)F&iS?_T{v9nam0fN&Z0Pax2@hL z%pgfY8cJ*LBdC-1^NhyB_PL|?H|=(ay)l49-sd&JDsfd%t&DU&tn4b&DT7y==89^% zTP6!y$%;l*h3CLhovq_!q-$BwQ8rz8txxuo-@E0ksOz3=9o6Yee)w_E5J5{VfVzhw zN7_gR*oJ7z1!KYxf%9ZQA&kWx;k;$V!mJU$16Ya+lChK* z6}0w;rQi{I%$MH+53uh<92*YE6C-QKwX4Fq^L(0{7X%enqMov0KYGMK_Lnfac02SZ zXtA!`woTOD35``3b#t!0RV~f*?Pu6ZlJueF2<_plb=46XVy{mPk`!L}g1~-K%HdMs z%-~7zci_Xdrj%(8=3vw8M^g~kwNOQMj{y@SnT3O{FsHG}9~wOp$2E~R!EOq+85$_{ zrP!L1>%-+qBYCx1j~>g@zD8;f4j~2X8}_kv3dgF_fiNSHbUwktUOv>rAU7=Og^e_E zpKHli9UNmZLta>9)#i!N++uvcStjHOiO7S}N?*-A_grmYu$O4rVj#pqfML+x8#%?~ zmEuf<)#R286!Rw~Z5xVzKbgc#*-WCyXzQ?m@{~Ly*B*q3_1N>D)Q_I(?`9RXxJqAv zry0AR;Za2{YS?+&SRYov=R}IRd2S6xUrVP`!C9ydzr=nDYX1yuI&%#gVS~AyO&K zpr&K$^XQ(n_P0$Tjzrd>Q){n`q&8PXD;(;Aee8o#YOvdt_H)bGgvRy}bEX3Rpbqn5 z3Bu<$WoYO1IeP!US6^G_##i|VmGZLFGa%CZAxGkH9ujNV4;x(%miDdHSJD-4K zVg*~;!B1@^`=<4weO3-=YgQ0yRL#V%5Il3@2jk+l#%Bzm>}q=H8pnYUi6VS&pg3VOW{R?_m20& zCDb@sOH&-BJ6}LymCX>{zP?g;bW% z(K%QY_q@7(2JlKq(>nPb_T#v?8AMytK3FsSe2{=|G8B}`$m$I9u-O>~*ycK)2?eDvkKB;aqqu`ST zVY)kRVl%q0fisDwn0fFrBB$3W5ZpCo?Xd z-m%|p=II_w0{OFs4bi0zXT9gaxVZNe$=#;#G>thcQ1@DTJI5@Sr}F1ad3JoYS)1*U zj{4i4L(fJb7PYfgvVoKt>$My;*z03^#(KWt(vX2jTkseburnSnOWaXa?8cVjp}eR! z9urrth-JaiWUdq9?_oQv=4x+Z>RuZX=232)+c{e~)>zqcAi({wrFY!9+41Ws7~*Tz z&7984rOH6NOjTWBOU6ZRzR0P$dJwdQubbxl-#^n2cO8cL$_il1_iYsYIptetlPf$# z+Y3hKT#9rr*u5EC@^{GfDbn8}QNg!M+LW1(VE_#@?M7&-B1Dg#GURMNMl* z%obo>1W3R(uv=YK%|DZw5ij0*5uDHcr_aAj`LF)xPv3v{n}4A{i$8VSeiL8!T0Xum z>o4Cg%a(Ggzey#nODWqLKTmnj`8S`If1&lfKKAFIK3_jt(&wLl|NC#hfBow9jej=A z>nHDT;^F-H<>7q&H}dDN|Kii9zxTgSpT7Al6u$X)U%vDGw~z0>d3yW#Z+`bT{qukR z{`q!%+n@h%-{1fI-RGbFH6})pIZO?e|rD!Q~Uk*_y79z<>NnH-#uUc{*P|) zhyM9`uFuy$TKC6|fBND3?|zE={_&K0M@~gl8`)~eze6h}-e*XRE zZ+`oipT7AYA3ykS|LMPf^V>iE_RDj8`t-*yfBWU@AAfm3m;LXb|EVoYf7*aZsvm#2 zedagoUoF4xfK~tP_y4@z{q>?Bt{<;Ie60IV>-w@a|8T!g|FAz_{=qK~@auBF+LT|8 z`2WA&FB@d9{&eeamcPp-r)y7ryZbeAS^JjivgPegBb`THukq}ojbESJ2`qnC(wXnG zjx(=0^>JS5OnX_kI{H~308vs|{Nhg!{&Ce&uczkq&UIVMSo(5}b=*%KrQGh?({e9c zTkHF(tERK=`&f?Sxzll_Wz_Pdy0>j9>wVtORmX80@2l4H-j1AGZs&Gxt+#bMp7YLo z>$T_dT&e8mzV-K2<<6zM$=Sz#)or9yE|dK@{mes9-ErEl-6j=DYPdGEFL{oZ@O zud|-6-bT2xx7OC9PCK9A>a*p2 zyV|+tBQM3R^SWN=(ZKR{U1#dgXzzQadfiWMOW8J0*v=AL;?8#H>DzTK+qhG@r=73M zv)uQ3^;C{>j(WDSZ_o9#yujA`ns0=IPbpqQ7%i*Hm0n3 zFXesJwJiC(Yguwl?dfN`&Z}O+Qi!beF6Xmdxh~TRgyNL?iCbL3+HS>!NaxyGZEb7y9IdBk?eT-HZ@o&A^Bm_mrrp@fy=~)cg1D~R z-fWq)7hqdT*RXH~wXb8lRx!L>4W_=YYG>caQR>m{ zWT_IfJb`xSxu-op$96w5X4&6Yjgl*LUZn64Mu+DsV5>aak=j{HY9%?eDVG7jZrf`< zi!>bVl84Vy$J!wHb@y#)+qsS@zp_BxRtMXkZEdB=uM32(2dpW50a@(|2zjqC4=b`! z4)k2l!`|f~T>wKr?!}Q+O9sjBtBSI-$iQ*!Hs)BKa<)B*SNXQyTY7ST);6#31WA4i z!vc-AtFB|;@0{~}q^Nwhw`_HvaNb&uK5n^T3wwOLDzSs0doNEJ!gEQR4SKII5z-mERLDiM}vag2#S@*KQw)a(IJWD$i#LEfXPv5U3bwxWw%x9J=P5rD>^sdU{ zXvdnD^B&4U%F+TXrZvz9H9sG_cuWyxm&&f{L$y|9!8+wqYpmANi2 zcJ5F~qz1rUle$$`z^FuTr`tXV+mq51y&k=+GOHhZZ(hh#a|2#kAAK{@XG1gt)jj2biV1<3n7;$PfpJ+I~2FX@6f-BH7p!f_uv z+V@^zU$(Mu5eaF%?_ed>_NYp|l$!Csq<|tG(^b!KM)(b4vp)#Yb!_yy{Cd`bN#p~( zeuakvUAs^$^>qmU+84z{MIUepB-)*{w!2ML55tg<4RLq`IjWErRA;>wMR$#$1Z)6c zdamsOPNnb8N5HneI^|`oYHdAIFHUkx81%&F%bJqM5c#{}b0nNVEeOVOj=fd~f zT#23zZF=jT&WRN(y6*jMDC2&gD*?B!yHt4J%W2*VQa4Il)|_QCkO9=AZ=(5t0hq4^ zysuh`3j(FvE7dzNKt4O?LluW!qupTZn9nQU<2Z|=eea7}`D_o#kTC3PZAE~%MfP!d=-sFlei;p9T`Dx<8TOQry-v8+Lx6RQ>R?cTVBtCvlpBT znjN@@ZJ>bVP>^1M*lqLsxhevRQ4*eqJnU<|!sW&9B~_xBAu6H0PUl4k(J%g;h*i7A z0b}X8PxaNlj)hiPrOy;m=(v1><)e6dgl)e`q$3Ai<^xbk!oJt(s3o^Ng}b5z9NXBZ0)Bs(ppn4vOdU)P$dJcFfKv z=8~AeeC^ZiGuu>CYy*cuaNBF7gY>dI72(FD#u5r3@8i5DK3`Gf@VTzc&Hz*j!tT(B zN>wkoP1rP=>zdtJr5&IWWCN?uQ&z6~P!xzXBv*F9W~gyMkbA94 z>uuTfy2_L(>w(NBAizmUnK{5Si{)a>r)%u>E_ZKVy0 zEKW;(dPh`2cX+U&7Kjz%#%i}y_LIsbR6Qbwhkq6GM+su*ZaC{HyelV=10}W5Y#R7! z_|n9C5v)Ya$=mhdSFnW^;WwpE zS?L*~Qbxf$Zhgdaahlj^c`Pn#L4S zOx?OlG11fOqLjN@l~up-f#!mc`nbS5qK3~z=;b;xa|ou=9Io`@)vrs)vPTf~DN~}X za$Xq~jDtJu09qRYS9FbG|77Vr_<`?x;SvN25EUc+BAxnWRcxbWV0o-6-UTygj;NKK zK38>z4&fh(?}FYjH+P=*+Sn;d@^vEh(;lh? zNhT;NHoZSAvQ(>pmM;orVk!_9torvJ5)&1datpYqq2Zu70zRDMS(3ZL;tDU{S9yjj z>_Thj;SynKD7BV$aAx&p@VI6Dii_g7gyeCNCE+#Hi`N+0$lJ(+%){Bm+02#)`xRrS zX4HCBxFvVpItTE^)(OOdn$m6W6*4aqySsuE5TlR7vr@mw-7q4r5DrTw-riR+B`Lfr z%?1AB9>~Dc@MV;(xBSqINyB4`sJ*lC_VWr*A|dGNbYK{CubhsQ2W`Y6sXO}@ zOlXyBi?dn|Ej|u7#0mN`#i7=LLbT~@G&K%NWcKF5G1l(k$;dUG0;1~uTsElj%$Ql~ zEY2I4Xg@WN2%82>?lUlas^*Cg7kSiJOE479L29g=)=OmU34V4;W@2B=D3#zTQo^Q$ z+d%Xd2z6VS=QGojMV$#?o1=YpY24s=kSrYIBVig^bX8csSI8#kM=4djCEZOA zhTYw=SML>@6}_n6SNxD~5Kzckd$O9;Fl9!u#-4+Uroah*S!ffAGM+9ufl1MLmBouS zbJ)OqEKI>=V>}LUS|N!AV4Rg`Wk9eA>EVqXeadCZUmOvFY*!&0elinl zN9-R~4)v-{qs&Yw8onR0hi4qN9NwaZXtgwB^7i-+urN*EE7S&FoTD1X2h0bRar4Yj zO~nb)B_b8@&8+fiqoU;f+(S(OE3A7P-ZW>}Phck|tO0@4^)nynVUW;ePIS?R}8dn=3$2iR7 zoBCqqQ>OT6J!o7vY-=SFVUsBfMb5=SUvKdVkf-yq&4eZ96ks2n8mFpD1jU`W1}Kd8 z33ZB%YcN{%y-yrmnP!UwORhUw9^P1Z zk?%c}k!Sv}ZiFhs3r}DRSivd98J{P*bI~yGt2S2I!-hxr^iY)_849&|20MBpPbE1% zHSLBmhpYvjp@tS`Uc@e+yO$nZ&Cw?gwWLUHWV*}Yl z?-kycS|@Ya9GV5uB4bkoAtDPj+z>=`Wt|lVz9sD_X#z5@3CLGD*TU-1XWaoXB^gL8b^X3~nGbK1-u` zx(Z>$K7}?)UNn=z0EbE4KeH#*#eDdOUriCL3Vw-B^s|%N6k=4|kF+w~uC)p7V4SPbs!b$<`y0Vbp`F%wj?SbnA}J*g-jGZo_)> zQH^y)x+z$WH4KOz-Bf)GWh@NU32h`U6TlX1q5|fqLFBe+8S=~6#3`IL3+LUq6d@)j zvO;w6RFEQmkB+B(Amo)!4G3)R9UEGqxNtZa-0s_^&>9po-p1O88n|$@kS7GlOkKFC zH(|no-xYH*Yzl%A`flMw;${Y#-uL2;!4ln3uA`GZwRy}MsmJZ^YNZC>F>s41=9}0Z z>ss~8jYDBoh7h6|;}&LHh=Uc~)RK7LOQ`LRM)NhSZ=}@(hU(3TP$G1f7}G>}ns9?_ z-wbiFqPnRucU|!TtQGeTC69iue2lr{gP3Ctk0!RxIiFL_138*yh4BKSK77HPbx^B7%tUAW?f<*JOrkcW2 z);T30KU%!XWDqI96+@9y+bj`nFL&N6WDKx}lhCQGNo<$H#!^H*kZo&5La4{t`?|Er zF)amRGc}|O#sz~<@{pcqYwMmly>nV&^uN^}QbjBQbXpbCXXqtkfmF7HL%4H`IKh1mK~=3R)D zm0@9&Pqe7yGMF(JgbY)e?$wamHD=&$k+%~~22w!ob@Ol4>kwn%&b+T$QMDK-G0vh^ z>AUHFbGupPIMqfPXk5~x(^Y)Q=txJUzmz+nLVTzQABBksCeA3C84uYydX9J&P8ZV@ zt9IjRN`-UGHU{Db3#86eiA-xHk6=V6<~0e4@UbaMOjfnPyVcHc$KKD?WG}ywoopIV zdeG+JEr(^P)*+gjKPvNtY{BZ-PeO=Aryoy3Dh4dmuhS=J_ldcSbhGt_L=_91nH^w*y=o5ZEVVGctt&DZ<9Fbfp=| z+-~O}^!%J?9)wj*)4jrTR&+S6L@4UUsbHiC3B3)L9-@iA@+;rx#VjW(Hhr?)kTT!K z{WMqHP5P5Xjsy*v{dqHNG#aK&xuGcRAEu}WLzN#BDEZQ-3ZaNbZ7C z(v{J}(%4k8~V}M5kDpLic@b`hwRdMpU#Wr1j6{p+8`=|lnkXA zA|PBgbd#=P#Y{0Tl|XrwK@1V&EOrpiYubrM;b*?hK8|gQs2CHB8Lx(9T?hl2hTpNS z;m*)0vX6FmW+@CL492XT<_=?s13=1XY#WTgtw2pMfn%9)Lp4PQA%p4=-AUBfi75ys z(=kRxOVygUe9woPVAalc z!4P98g)e7;iu{D4IvJsTKm3Up3`1m9EFAAJ`yFF}F%@v8>0T-dj#3zEEZfK{%#gjS zMTu}n!7%|6md|^I(#~KEz-fY2R>nw-p>JauxK+}`zpgL`MC&j(w&5DW-!)R6FJHNepm@nWoIQN+tCcBzffQ>1dQSW zr~<|yhgtjT3Z4DF*HEwkwFLG)E@%gk>y5NfQa_Rbjqu@LtGGSmHTzD5yIyT z#-a0R0wd=32emb=kVPYCwqP zKBYejBrsESiFX_xp`Lw2bd~s&?QYF+}L1{MrgGQURW~W z%qLmZL=byXt0yi5M;tv|QIt5P%2P71?xx~?)(%<|-UdZ(wbzLfk_3B;tq{{D zXTzH#>++P9ie6|T-MJ@VN$7^gfh5r1{HOIwR|V$1^Qye{V_|BKml@&HwX~88%WloGj6< z?e8VA_X;U^eBX%Z)5QcFR!G7P#fO9;Ck54msnflT|HTWnGM7Vc_#jA)S%=fOBjTt9 zOw?jU({5zbC@4KzB-5CT@lKlNHKGE2^s=;8gxWki=e-7*Vqh6OA#2p73}F0l0%<(9 zG*5-$F+JCYdzM~o!l2PLfDuq#UxtErrc4N!Q zH(n~_i0{+A_#mMJ&B1!Fz$k2Z$yv`#f#-`#1eYqA(;l+7y|)+xm?Gpb-#{-1`Iq;G z_sG#WYl!?lFHV{4fh=BSbC@xaCMV++zg^4N7?ycrWoF{@@mgMhq45)rfT`PPV{sd~ zGSJ0!^k=NNcf4t;@ zp2XOqrU!1EL=|FAXg;VtJ%cn| zOK(guaJtBx&dXe1%(8OuFdNNtqdabh`GQEUkqwSgF|%dw=qYnLh3)>idH+*q!yZCLB;D|< zmJy!BFY$hjzLWi!@{2l*hALOeTTDi>G!w;=hQ5sl9cr$LA7bMDTu7c*9S#lL=y*YHyCSM&S?+PKq|}oRYdbIGgxk`P5JnI1MbRKyi_ym6dasQO>iLrim>_N z4<3fn&{M!0`cvIA+OV&^h-9|MKz7(k92XM^!Ns_c^3rG#yxu0g?8Rjtt7pPK)Q$4) zYwq5)g(l!X1}=@sim&OEjT*93bvk3dhRMr_tF@2E;${nwS3p%l<{ zrf78s7pxnN%Q51cLuF0XK^|H_@LQBT{MgGI2b?kptiT@h^V+Z{$C^@#kg!c?bTy1ApFue+4`6{{Sqy BNbUdt literal 7061 zcmV;G8*1c2T4*^jL0KkKS-p7)$^Zr!f2ID^0Dw>h|MGA{R0{w1{%Qaa00AHX2nb*x ze+%6?bauu|U~I;kvuY8R*%c*in#S7^qfKeHsu@bymK9S*lWkO`74|ymrkZM1RbvK& z7K?1vv|5d$&h8?q!4*FU(-R6*Q}rg8KmZK@G8iBts(;`{36)g<27mxCjQ{`#Fqj0u z353FFfB*!^rh$s5-~gIZlt47l000000Tj`pFq$;=8fHNlO*AwbX_29(frOxh(1XmU7Axwhi^2_R9KSerQfA?Q+o(>5<{K-xauYke!oiaSYd~wh9hYfuAgERxHLr z%NctH?0^Lz0wVV}qHHI%i#vQi2`KYrg`PVv5Pt2kEBRy|>6SV`*BGJL39$)2GYZHM zY=lQG`#@0F2|#z#3PeP)l2ubBdq}ZSX@+i7J1ve26k%M;DxV)tIDy_6k0VZJodN!(Q_1GAP0jCCK`xq8ViuwvyFlIZ09qw0n14Ar%nn z>{xaO1Ub&cw=eZSK3<1Dn!+Nm>6hpG_&E7?p`M0DYIcqd z0uUQC^hKFsH1QSHb)!p&e#x!@`E(k&AhUrDo85npw z=apGZVV+8AHhs3|YtUV=>i4s^=`%=}KE#XyjinQV>LX7T*iOM=W+TJfi`5rlV@It8 zj`MM$?1tr9WN&2|V*a?ZV~erm4fC~6)iEHFP0KIB+-Y1>~2dI0t40*KjoZJ0c+ ztqFVp#im>LKJ{Dj^hV|Z!0%j$I4#qAFiqC1wKKqTVpWl<3{9|t6^H52C;dOq92Zsrl zE?CC%iRVta4btZaTdfUA%Byd&rC#=hgUJ%*I1EePT`7KDWpAQfpSL*F2Y-7YlX`Zp z?A4CpQ3;dU6_0%$WtAdyLPTE^b0Na`?IYJ9+~WgFuShXA7X#TsWq|Ru1)Nj}YA=~4 z+oQ0%wA9AHbV1lJ3xd68@pr#9x6mW3hzyYohdhw;tOjXo&8aq=SX_u+v0JZN8pTB3 zP~orzf^qvzPS#Z+)wSFQM3Zvz;j%OPch#_VP^hrp7Q2bG*`PwTT%IIYUuAoL)BqTsh6wwmt$n?h37Tcp;hHeo>r&qU1h6-mv1wTu)D7 z)za39{1;o^QE)4){{uD7YNNg(0cGkVb73D^SAdYIz-~oO)-?Gf{bh1AzdC8K8|!fM zY_wvyn=Xif6#D_lruFFGfvHa*lE_6q>_r*9xXATW!|Q4-Y6q(}vMY+(51)C|3&7|! z+giaH`mb4bj~S7h-Ajg&qBtp2*t|=WHFgUph3vFYw+>(%m!=mhZcQZshY-19=gKhV zv5j&_*Ot!PkG0qg0Npa1*{$?whYR@d`O+^3jClkWm zj_9gnCaNCIk0gXTCZXeL!Lb~lJCHO6Ib01iU@oO7YM-boq8mz!8PTNfgm-xGoGif5 zh8Q-eI^Fbjai-4c4d4xyofRJ?z-2jDeB(T}(Ndooa&7^0G~<@st_L_vmIPrGqv*%d zJs{0Pa4vQyTW@ce4!g4H>iXl zVG5pguWfD?EP6a7?~#Uq63&{U!3mGPz^Gupv^CupJ}TqSyQZ3RhaIz7LKqo@AbWSg z%6emb=p*GlM@}^IK*z{9l^79j1x!83hF&Udr3NQJX70EmA%~;53dS!((mg%6Xx80( zT#EH20Zi2__qP_1he@sR+1Ay_8@_8;WJ=%&rcFeK)0Bczz4Y7MDnA};qMym*Jq#8! z(Re?m*sv+u=Z}sMgocImK}#EI{gmq++IO9~$kt6QvlCKT7yFD^Y+P0G@wwMNM$OAp zw_WAhy{t3s2I8VFsuwfCw*n!?d^AXVJ#vyse>SyuwR1%k4qIaSvD>akx#BSd!qb)7 zeq!G6ES81bqU0{Pj&m<7SLHTP#-b*z-R`q~-T{yDPx}|r? zNto9U2zhBsz1^UiFsg}=^$49%;zy)RNKkQI`pf{%wq2KE?+@Kb+YdGkW;vfi{IV2C z9dBGX^ofUG)FmZ!kja?6HVD3tkL34w5{<7q*VWU#qSp{cKTMKmEnkMg+IC~%U{Ja! za50s+>3MCQi*acb@onB0I_U-Cy#|yaUv#+>1+>Y-JW1=a3fD>J83x~`CPe~mAk9HC z9=lVflg@;|XF_YfSLpVeM-qf?ZwuLpstQDTg4o}051O2q#&`Cud8<^P7^HM9z9R>C zEYKRidM_Xj)l>zv!r3=!lqo(9I{T%<&hJ$N+EONX97d3OWp4Wq7Rc(5ja-1&a!SH3 z2c2`5t7CZD*R|z*n#;<$@q-gcCI>)Rwcz!XbS=U0ye}x#bV}>szmvqI4HD@0yLl?c z&P6rumMswt-6wJPY6L^VEO#07a(QE#C`BGJNJ=_4W%p~YB=i{sSXlM5P3NGUAEaVoR6hbWa!kpU2! z^v73prfI>*&i4Y%`K=PoGh9gGzEZoz{90?^!`03~u=!5}GBKmVRo6r$hBRAi_D5nQ z;!h?jUQ%R~7rt@h!pFkl7YJea8QpB@b(tRa$x+3*hHygI8~nbJ4m3X#W= z%$}uvX=jy3dOhiPH#TU|)46y>V2;pY1xQGSF|+vPZVM)0jPq5V}K@tslR!CYPs~0`TDlCX-uVWmL}NF%(k5G7bKVzyM$slr(~G2f~TG@T_3m9BovZwUGju?jQB!9Jxm>L zsf=}X@K>Gnq?2qw2DZO)T&3BO)UgMQ3=$D=8e_sE0p=O3=@3O`)#P}k(S&*2@N7cL zV>JgntRu>|%qW~9&JnitB+XGQA;}HmPywyeK@U~sP;SWGXma~uh^qwM@%2w3nT3wn z{mFCVFjf?TNpGzu!bsZOMBat#S0J{!Qx%yIv25*Wg!Q79QsG*20a-JTyvo~f37GD$ zYih7n*xW0KJQGcrGBPJji+w;kqx^w#>+j)3Mww@;os@jXOSPI-O*pNf;qfiamBqP}w@qx2uMR*mLu=I)F z5><4K{Zl;eD@qMU*cuTNu$_*%m@F{5~;R z9``MI$Ksx*b&E%V6O83OG`q4j=r3=kCa5%?+NRXa%);*7oa zBEkD?(#n+a_iPhnaI3k)QlpHxF0FkdJ6q(}8N+9qFbZP!{ZROIWFrp%agykn?j7?Q z{f4d_$7WTuPARucMQb%jJQk*Im8}YB@PLXY#A1$I(l|h;HKZiMKq=mZ{ z1>`$usU?hzN1k305!Lo(t|j@=whCuAo=9A;cL^E^uJap$Rj|EW@2-eBWOxtwSUFJhLob;81(bS(3|_n;068@zZRRNNkpjE0u+ z2$6a!G-}i>-1uJG?%{&&b~BfNr_jS-VVS6wx^}-q%yz{*tR5Gj=Sopc+Mwq{x>OI2 znek{C?T=Ogsppd%)xz9%1wJA;b%)8K8794n*`s)WcHNtkSDIWUfLsoAt>|t2)zhr0 z?QqPPz`++BCF2zE{kw-?HJ($1Bd;7EyVb!+rbNeby`vjA!9FvENGB7Eym6bNceY%o z9HyvT-raHBnh;?Ls^$lp+{yP9mq@L1c<$xHxWL8i_m=YL^dLGojE`)gZkJI>P{8VZ z-L}YjBAd)~q|O?~q7zc}PEr27%ESF`Uyl&lOK*jOedW+OCI=16CEEPfxy7ZwWu9_f z;fx5I&7?IDTy+nEn!q-1SAK_4m=|}IQV42T4@poNzbspuVP%yUfr* z*RY3Ij1EmV70`S|VNdHR7};0OwH$DHY>g`gT>-qc#;3~$?w4F`pA)XFDwD9qO=K1- zo-cYk9PG9U`?I_Wp@Lm%l(phU7Iu*lGbYI&0Sy3o7c7pWCXNg@yL@{(8)c7QZqnS+ zV`lt5z5*>yInq$s5+!eTI8Mn17EVh5hdK@VrjS%%Jr>(nGevY+#VRQn@2$>~oK!VrH?-y(>@KT&q1h_%0)}vGPO_OsE^nmtWUA&e;Pt(Yc%!CIERR?(JjV!jcqQMP{BC z9!7S9%~J_OX#OU*2RQKatse6-&r@)M;tI5ErEseF-Hwpv(!_Q_?|Z_Y1&o-ocL9kdoW$>m@=%u=y=A!d=X*ZBHK~7VBN?9l+E< zfc%o#of!?aN?VL9?)R{hHQ+|72cI&t7~TfGWg?0KeRX>w1&w+KIsEQ#L0`4 zW@trGGlVP$phCK4#6>tu7?t^kh|7>CG>q>*4e}iMU~1fO;#2kSLtBwq`MM+(#))NB za68`6KW*DH-5E=%lS|n5L{$j%-Ev~jrSogL4Z3|lL0pwfi_Pbl0;EXj!s4YpZv+mC zPEM~7NVsBQ#=8OI)9OuNNe}$x>}}T?~l9 ze8{U~u0${xvDb{f+-0`HByQzd1V75BO^Tgfjgh}D-@NPe1;_%DUJ=XzI7cDXVeH*i zloud}1Xd?(v*z*DhZ7`6cEMM*S|>^5fD%8b&IJhmzqlW7`N{IR-+CGmQTUCo-vkY^~5-g^uvw@NkzQ)+386JjV&TPKS>BO*`k#}w_27Bm19%|?-4t?^(X;@#Ft zYKLkQhy-MCxNiI!>^7%`snf;e!c8L1a4hW$PEsqBHGb)C`lE0z3q!KCE9t{Qr;P`W zw|m{)s;@#Nhm?e>F-=KaPznmJDUP5~;cJZH=mu`lDY#IIC3%RgL(aju5Ou_*M)Hh% zbhtf}2lZZ}Gp=F!+&wGZ0@e`iZatz`f1O5_TV3(rK+M(W8Q0SkwVNh4OCK% z$ZlZN61-4icA1JwB8KT5kw>_pI7bru-fdbB;s{QS1&(<`JLRoFvHQDT$xlMux@An)dN~!)5;%Meb?MY%v4Kt%%o5f`XFY!e{J`;FAi&gj zVpEbF6;wquP$8~A0%?S(#A&7N&t7%z=h|9be!+@!riH+VFjgUMJ$PG+TMlYUte$}4 z5koWN+i=DQLsn|p`UT`Cj{?r8y7M)Chana|B~>SLJ!lBM_aZF-0EsK zS|QlXO6@x6mt8z6%qrzBw-hc>2h#wH?xXf9*hR7b#tD01Z_lck*>Xr)k{gp(4QQb8 ziOJ_nlN`!y9RUxUD{{7T6IXITc{udL&h|_Z3cK0PG@g!&6rV2{O{%gq19yWxt5V?x zL-Qx?`oTCGr}tF7h9ac-v0@U=NJ}tyY2F1S5-zmDk2Cb(j-z^85BLg;X`vi(M8hei za0q_qYejfU2lXw)ZgIAkglCvKrXoqUp zt1_KOCY2?MWnM8vq={!;c_I^|HMp?>mCMX^m%_eIO-yE#hpFu!WUZ?XdoJY?p2(Hf zQtnW#jfU14m=`NC`dwg=-Z;AQbGCb_leF1MgV;?}>f%Z68h}%n+=;r9QzM-0G}vtp zdFklIba}_JBvC8wj**6QZl$!|Tq2qA2PS&O z?2l6n9Rt+}0Hm`wpfOoeBUwT-k{Q&SE9)m=Xdtr1w@ylV5|ka{bXIs6g(X23eG{cX zT1G}*58F=AYuqfn>h#NmXYE5st>eaVjc-^_34v+59s;5;*?oA7LRP0l*xn(`w_Aw( zdDEf3RTZh*t?>q$z>uwRr1mXT=*1FcGN#0X)NNoqq19(kJt+g=K5Hv!ZHAX+nTX!@ zYCQg_tnJbg$%cSwtYucjj=@%2r9zNit;niNXc0V;JAJadRL1nFhqYYY4Tu#V4={ME zagE(&(epzDr~*WR%Ixc8xlYK~&Z5>J_0rCkd^ZrVCi?TfC*LTOF3FCBvV8wt>V64k}r=&;}~gF-tC_Mh-|vW3fEN zb%KYto5;KuzJ|kj@h$$Zf1Y` z+~T9%u`Gk=a4q6SA0|r5K^w{i#|M*s)dhjwtDWLyP86bcI0<}IVBwNnVQg0CT@E+X zeHGv^6M}ci>x&hkuXvL?6zah-wh~x9%fD-OR7k=s*!@vV*%FXT%`)$r^ddVQTYaQ$ zsbjJ7XiNJ$Ls1#(nY?xQEsX4L@|Kgud%iuBaHVb2zz_j9U&GC*`u<2_FB)f?Db|NiWx86 zpmuEbQ6&qO`r9cz8gy68urI>6PFl5mHS9Fv`5WR`O_a^*+$4NE_R&=5*y zAczQ?6#1P#-Mx41s#U92RsZ<>@Z;a~^I!efAAbF-pZyE{)%k0grk~}X>oR}-*$vbjrxb3b(RJPrBp{kV?Z&%W#ag`TJRwZHq}$K$si>Bryw@|VB(b@NBfzxc8D zZT?I9FL^nCeS10o{D0)Hzy6EwzWYc2|J`@r|Hu@+|5tzbH~;>N+rNE(Kj-iM_HX|7 z`SIWX`u@fHi|76~*Y)e){p!ab{_X$#55N2S^Z707`S$96{2#;jfBE@^e#LK}_xbJf ztLw>c`sqL4&kxscfAO0ifAQ;I~5xzB_;U>+8RL_s1K2 zcl_%9I{Ja17x%ChJ=&%3r z`+uE(c+ww!_sbu@|M_2j_x*qQ{D=Sa@BZ`mKmYyD|L`W?efRr6{I75S{Qb9AblHFZ z-T!Xc`adl}4%TmfbNQX$4}aDDv<9B~?|%J%x57VN^qb?i$KQXR`%lmH$Hn>M4Zr)- z3jN_nzP-Xv5Bz0WzHRjX`HBCqL{{rhH~+r-hi=&VZ5)>8oR+71`gfYH`I)bAe#hf$FcfRx6_x(Cu?>k-Hz1;V@Z^PU^;l3}uCpf!fT90#luY2s)<9*() z8@K1~hi6~k_Z{1--fN!vZF-jHTHo%xmTm0s@#&Z2oQM8>y6!lSZYlQ~`gtA>Z?#YJ zx?k%$981@~-s#!T@tXVTy^nqQ$>u&QKkmmd?pHVT_j?}C_8x9N@8h!U<8kf%de6@~ zFXaih{r2ATdvE*j4(EGq=Q`i#(GOcUoy*+6>#}=__Ps8);l=#u@jUxBcIWYK-TrpF z^}Oa|?q)}RE$`KD$N4UA=a(PXHg)%W4AZ+Xm2p{z-O^6m+P_zOuVXrgYng|0>gK_Z zS^YT|z359eRuEYRPJ?Kw_)wP z`g8ByxDVUG_Ljw(PUmrS`+OejyB_VmHtW>A!*20UN8;Ysbw7G1Su=>jn0Xgp0it@WxLLEeV41Q#-7u+t$CRH*w$$orhYu;t9v}-?HqM$ zzn**9<$Irx`&!3q>8@$$_Wow}+y1`K{&4c|x^8cOE@%5*i`Sp+?y%U~ds>d^S(oAJ zkArFU!@vq&#!=SL%l7ZFY}-A1p>?z3`vU{43)ASf0eafkvj(eCUzQcLCrg63i z-TZ9Fcs;hhKc9ZvmhCMz-;bR|e~x?Fw$YE;tA1Oa`?lvJYwo)K7^k`H*LioFV>CX~ zbU0Iv=rrE@Fm&(5H_aaXJa68lU5)$cuX#W1)V+1faqstiozE~W*L3$(oEvR~in9?qRBv86}vuzo6`+Teq?=oK9xNJLT z+zrRTO?F$cjqYHsvEVqHXxO-mw?gWgtpP#;c!f6?&?i`pE zGw%hiad2#ob#k_y^I?CVb6@B3gtMb`9ZvgxF2}Y_@70fI|FDg186P34ch26k*v7Ii z)46Vskg)p0XIkIka#%KJtm{tycQBQ;JmGMk{k_?({y3k_&!6n!SefDd4&v5!JeLUb z6Z-nZlyljpwYSr2zu&{V3?~bA4i<<1*sqrp_kKrN!~GdA?qQpU_Zbg6{ql*n?pcp( zx{kH)_=oo?r_vw3BY!uHrG#&f>n#kdaTUL0B9&&-N<;@3f@ZRJDwsA;h(g4(r=%Tqiy$9IBRPVBPpJMX!_ z{kz%e0Z8J5pZOK0m#KZiV|iA_^e*SVt#dyL&-{<`!BUQHx))f36J6S??&q0~;ho;| z?hhAsj-uf3aY22s&gdujs`e_VX1nt0esu0HHezq*&Y^ci*VP&A?)MnWz1C$C7yQxY zqzfj;emsjKGlDezuuOaryRY#}_}n(2(x1Nb=A3aOP{Y+T+>=L5$d31DpYR^Gb>+d9 zb$*uXa{^DE@qS+~Cxn7~{J>Gp*E_ktAapt;Il%*O-5vAoF%M>TutYC8SKAO-hv!&C z{PTUbZtIuP4)9p~92aai1*5$@A!q))77{8B1hrM&#wqYVqWbQbvBYa1%f0L_8}IoR zXUoR$kNq`t7dSS(`+AP&xq-gL+iTFw_}-$xzV^@4_dx+1;2|vVkv+$IPIZ@` zbyyt|*np`{8#goW`{M{)kB~9#LPoH8i7BF*1$!XDXX>579w28Qo|LDQ^>GH?^eE9` z2C@%uaO?&E2u{Dm{t?2@x=hcrj^lBHOznF~6y|q5fgo9^>_SF$vo5}or@z7LN2uJ# z@`NXsv3F;`$cjW;KQkTz@(9ql*Ev2s6~|q$jhqVkIt9h zQTFSV^7ebrD|Vxhzy;vQ;8{W8{yGIgDI@k`onURr;UnWL7y!n`X{=!Q8Z-}#Uk-^R!!xhQ z1KIP)y_Q`bCG^V(H}<=TD%s=9oRvrVX2Drb-^1MXz4m^arPMNUIcVqj3J?O*Ydvk4 ztQ&%J42sP|W+&f(Jn5(9n->-nxbN9Lfxb%)w%$kOZCt=-O0GN36L9aG7E}U@_0Xb` z6naJov}-6(Pkqn<`vw%bu>^i;H9U~s>ncXq^Dysf1ACkls3)JF z<(fQ0Uryz`36BG#-WJKl^nym)${NpQ1+~WwPQj5>Yy%f>g8d}0=IT%VL8W0pxWdL;TP)X1rP}mQiN`5+m8!+Q>48?EU#{pR!n5=6Aw}+=o(k3@Qiy3H6C* zuo}kfh05e?OWCgp?BxP%(jjawgG}AY7J_bh;UU#u?ESJAo4?jcp24L95E#gyAL~C! zZD+0^h;Oyz9QBGQMW(Opd{h9$94h%%LTX3}iD4?1mK(_Q8gZ z7`_udGs^*;!1f8D42h(-4tf@>v{RZ5*<1`cQE-n@tTvEj`~6* z0zyuAQk?CP=x^nG!7{vq=Nx3)r~`+1@EhLe5>fb&W0$ogpHz?J+2Z!IwC*_r^T}Utaz`DY0Wi`+R#|b;wP!#WCn;))6 zED7I?r$kiaOln4t1VOIPD_eIE18PR+9o;k_a-YC;gva*fR64!(-k%B+*rKG@IBAq) zEb<-h`8|2;f&p%s#+g{Dwh6o=v^WG2GGUFIjMEcYDk4HIq9r{QJ^~XSR%!4ukCgkg zb6jyC^jy}k3o{O*Kxa2+B?*4T7tZZO@(FC>Ij_+bZ_{ITw`1$MKqPc*+_60ImU&IT zDGbsp2WfZNx}JOT+f@qe)W~Pgd6ru#-ynZi_!hW0i*4Xa;0Z<%c1_;P_Ckz8_E^3H zNU@LiFc&Whq{Ua?E5dTMYY0@v>V$bzEIQ;8YP{O~0dqlTHgx4k4}ta@hF@VfhV9pl zAxUbivwhsz$+=mN3$c;x<&hhgn35a|Spo)(oB*I9omqGf@DShNKGHL5 zR6JXUlyrh&Y|hr9u*Zn4ezzvDghK+?-(S$z3c(`(!8Yir--ZRF3~n(C1~DwSck6jS zWequ!L`r`yR!!kM*G2N%tEDk&?h)DnoJ({VnFTm0iy2@_bc3Ww0brLysH|Ywr9A?3 z@ob~?)f(P666@NrMEC%BvUC3Blk2ja!+={TXW8D0hAecNWm`f7#B#DLh(M|t8@s~Y zmiJnNv*00h1P%v7nG8;rKB|WyR~gr7x9)PU9n+`~b?zLz=pB2&=iTQCDoU;ep-YfC z%W5dc@qWU|Wsfno>W6j>GRG&q{3E7P4QDJ*_y~3qV%#YDUS4YR5Vi?LJR(=BjsT^e zCBmMfIK0UY-yKzkoRmK!5GQ+wk&(p;0oPXEOQs>8vt1&CmzeP#ZobN`xSkF^k;|@P zV0)H7Omt{xc{L+?XVfK1RxThrMJhgOHy*I;@}NKn!gDM@nA{CWf%%v%3Ih<~0noA@ zq`jB=fEO2Ik1nL@9tSRWn9&FJ|Apjj^;6jc_U>0>+Y@oXH{=2Av?xMOgX&>O03P`* z_qx@hp_8P9avHq6OnDfcwQgGFSKbPR%oDdy7_MKW1kSN}AlD@cOr5*E7b+(yg{?&Ysr9@Hp6lqgN~=%k8!0gW zR(5&r7bLgCAT9`S{jACp`<2!U8qy^v`QirJdr7|?cmZe&_cUWA+$;qGG^v9s5Ft0V zsd#MZ6W-P0<9WH${a&nC$a2Ldc7hUN9dktvy`zXHA7w&L^PzsOE)OeU0M*%e6O^A~ zUwto?;$yyF zvH2ZeE|;@MFRB9aL`3u0w(AigRX*H3``51FtImiZW*IBT#L*n0kMu;qP@P6$DO>E* zms)^IbzBXan~mBA{}30DYfCT7dqot&r^*s@*b35)`F=^470#d<=S^WzO5o_v{;y*s z?Gxgx{5K4uL(e&>bHbGTyR-`a5(3#5++5yE^=`!WTe2CQNdX`#Pby23Q-FO5Lp2oiBwe) z5Xzt=yd1FrigSx&6YwM*4k~R7IKe+`RUJ&VxoVJLg4SyY1>)$@1 zLx|s&HN*&TMIbIJp?9WxpkW6t9ZL6YuYzn59|^AK2FzG?WC(b|2O@7}ZeoLEr1)O$ zMZqOF1i8>bci1ung}{upg+XqNlslJ`B2DVy4FN0QSNdi>H^c=B_fk=;nvV8PVkUuA_EY)mB8B7i0TWA zNB1*^n8sTIRS{Zs>bIc7HYtKL;Pxt2jZNUeJ8{T?R!CGGcddN?Zkg^&R?!y@$U<^zrKL6Y>BO zNc+fTX8{Gm^FN$a>*$tM$l1JyZC0`qH(E;bGFT$qS45TccwAef*2}?0D z5zCGK#;_A_u`E?ttl3RObjcrzpPw8(7?$|V0?2|#wqh9quVlm!n8lO{maE(xxW;;^ ze1*lfugOw+wG!?CUXiCGFU$FoBt3{YM)>^&$aN|3T65D3E2bd1sQ`CG0m= zjS>ar8(PH6EHKY;C}=4@RolQ!5En?2QJiXS>j_`dd7*xlF62rtK8Dn?*db*B^83Un zDGFl{xg}=?Qi}je8Ql@!NGoR^9ytYe#{rYN-sm~0p*m?FW;;ARIzi&mt|8@1vd4MzNSKUMtNR=$ji;%;4C7#tY1qanxK>|is{b}!&lu-E}SB!(j zb>O*#pGRF;sJ_swDxlG5l&fep3E9{l#fUc$M9A9YP%05pU^NVgRv2!tIzbz%M7b1j z@hZp##nMd1i&M42aBNZlZLjh?s&mR@o@12GXgNskAMne2paw)ubXD;i$_I!P2|XL2 zFHN;0i9Z?Uqs%X-M~9PflqVeJP2H$E;q-%pm){N;9@bOZ%M~#jQK@F5aE}%C8seq= zdZ1CHHgV8yw3hmuLaS(b70v;!P+rHJbW$c<0N046lYb>rQmoiEu(_@VUn#ZrcX7lz zgl-e3iK;baV05BOmFHI_h7*2^rHOh>JEYW!eaYdbnE=2O=);n>D^Hh7(F~P?)|e=Y z2SOD}-9u~={Q&_|CP=5f>ZjZx3L4CABu@e%PB|v;;tzr>KI$VE#)XLdlrHLw!Vf0; zWna7nHb*=swJI1OcTlTH(rUM;oO+ZNyoGLjd}H8r0H zzX*#JPA(j9MMP1DI}7$Fn&)kjK`?`q3LS?z6}V^?q5{GBIH(c-JmsWFP_h;TO*S}^n=}hu`-H%IP>-T8Jph(R%tBGXvWPIb z5FSIyNu=FgwJWhO3593b9;^`Ui<%ulO^C9OWHLICVo@{%Fvy3kW{Qz{jQ%g`2POX` z(QNbKo!fgs(+ zV{%J`NnclkDqQAH8pc8JO$zzY;iMu(M?=1##^J(RyWuhwTA>l4-Y*?#s;v*Egmg!m zkAhHG{lc?JHI&o96Ll0gh09QD;M-KBxMWZhPr(REOo7z}fk?{T#e-Q>6%mJ0t^;pE z6#gei(C(LGfLs)@b9_1K2-D@5B+ppyc+-@b)T!I6JOBk$vVC#`zgXd%U?DwDs*l&q z!2AFjvAv3zPIL@^jNf7TBsu&dvr?;~Y3bF|(`i!97u{(}s!~cVPGkqtj{b;HA2Pi~ zYAHU+aK%xMaMS{m$Kv*=zd^Pr=cIiJEMy3WY_!^joPrXNQdH)IQ$nFG1hYv2@Q0}> zF!|ZLhCPoXo{_N>LKEN} z9&st}r7i-mOKXO>HAy7f<9-n@XchaPem5wkFfa~E_y`Pd)DV_aa}4RR`uDynVygb2 z?Lew__|unZ+-=Tr48yBydf7}FRd8Bjc|e3hYq>p~~*pn@jj ztKB;Av1&410tNvcuSiEX=ynP?3QPzIj06!uc`pkHqLM}`SK6Wp>qi(7p`D-y&Pr}O zKA>1{npwiS3g{>hh)5s;goHgroyVnsqPeMPS%t^1r0ug7_yUx)o&yL7dQ(T`;vwa< zPn0#BBK))*=o)|_-Ft+KL~2f4N-@zh;g<_#-+l$al!bGY%u&-XrzQnK8@EuxcNlNYHV$VqIC{~euP%~A#wN+hh z_X{Y|jGC&Y2thO_mGb_m4y9#l!FQo?zVP!Yvx*_0+lYA;sHm^h?b9FOtfixjk`PDN z-U}TJ{3nI+V6+2jE64_Z)!l?b3Mm;U7OuUDtS1dk&KUHi1j30(s0g5?9R4f{28W{d zRzTg5?k*0E)M3=YULta0qEz(s0eNe-fZN-9g<#WSpj>OsC+=tPsYI zbbFN;EWZf#84640$7Cszl)&s8TT?-n*PUhMc|j^(5*?X|LM2K{ok@(*!kp@3>@3~9 zg=Y&fVT)u6X?v0XdbRX@+KT7{6&AQ4ptgNNLcl129ClG9JupO!QV=H?nHYsEmYUk4 zJR!}6IHidbj~Ki?Caw~6+W7^OCE{qUb>`Z8vD);$a)I)Ousk6+q>OGxF7KkxQ}&>8l|C+f9>$m^R?-m^Sa~)ajmVErPV`VtwR9J6ZSk=DC+g6vxqW#; z-KO%#85(k-Bt1e*PVk-JIMRo#3JEF77GD${S5Mc$0k`*ZnniDVBEql<=qgD6bm z*zitN{l8gaXR8A6h1@_EyAU~`a8%{#!V<;4}BCoY}xMi$oa1vfa`0|nO5Naih3ndOABC(-r6FV0O+{w9#IzSE8I!PsV zuzfEzFA4y~D5w~Zr!|A#i_cb7MSEaMFs2IPWer0ffq_&;0T9vb1(1`TQpI5Qv6cb_ zT~>Q9#UYLoxj2F5>bPj4lzEj^Xr{wF4lMHv`VxmsBfu^ZjL9OCM5+0pL(wZXTNxKt z=JwlrabysCs+A14OUneHi^x?2W@%AA%bER}_Nq}?jaUhGQlkiKO>4N?crM@T&}oEo z@U-n!S|T-NMtY(tLN(JHIyZs}@F30P>hbfqg!ZcNRAi0#2+>Ul5m+HM#ZDH9+atxI z+0#7IUgdXhbQ5Xb_Hd-(j+qoYGXvZZL}hS2;mx05)q+vzn3y4JIB-i3>84 zwn=xsvibr&ZeAF}7r0RIodULlDHZgQEC?F>ees8225uX9qIk&Piz=`+KP~G}Uwi}EJ)BCkakrQBR=QVzq6B)=1obsb56;LA__B|JLL zNKBG4m6d)I_?zG_y|fqv1tv|9?GwssLqMe^SiLHV8nu^#lKKm2 zAq0oAaQKSvRqx?*om&DYK8Yhx`T;Y|agbfZ+yuYmbR`negK*4z0P9r~Q3YUunG3=w z&`0UW6#L$?hL8|L5JexnRaBD4AT)|Gmef%thD(K|_U!YlqDK1Or^uAmEW(G|J-TnT z2+W=gmWyjwgEpSWL|(<|!3&C0NKtNInNL~4eHn>bQhOC~f|3bVC#frhi}iD|gzJZyyVd$2XR+L3 zlnCY_qC{_>kN`OqBncJmk4_|QIs%DMh8o6dqo&i#U)NB#sgk8=fK;Qu7$`MP2b5M#0b7+Bb)e#lVQp6<&9#oZN7G2?Fp{*Z$fG(LC((%V z1xC!dX|Ljw*`O@aBBU=Givz?f^1+B02)xD1U@83F zg{-o@m%132hpHCzFzN&aN~Q3>Dm)Ydl*`Ak$YtfjvjH_Sy-O*kM}+0Pb3E5anUNY3 zQ4ibJsu5&Z)K z?GuJ8H{T;-E&hdQK(Q+f8*&@Vf%A_K6FAFNv}chS5U_CXx`#Cj;vrBqJU*TZ+W~yl zN^wMeO3)&R9tWT@!Y2G7e6Hy1T9*bkmQ!hG(sV_{XVwo*gaHUCdgE_R1F9Y3Q55BQ4#b)15lIMK>x8=PXa;~F3rVj9 zZ#Z>y#?By|s(d0UD*CEn!S%>TsM9&|5dWe%8H+l zz!>DE3HelsQZ|$^2r;z2navR3?Dy7F>A|A>ltyXHq0YDdq~iW9LnYLtbc3Zorrj@< zVZU5aNOVPO+HZud)ZRX@E~UL>9467VSH;G9kXhmJ)72Ag)`y`=BJ3^2F5083tOmM8 ziNd<^7I3|~hMKNxtK$WRPI+f)K)3FS8LRjgVmc#OWX$wZovOYl0G2*+69i~7| zeiXBl^&&yJDJ~8rm7cC|CY2p)-wSJSs@8%g$~AP(#1T>f!e05IB0oN#y%2=8SAlX7 z8%2skMIkB7h$$>SKV_e(tFrDY{q0pE8t%lz55w>0O>+OnT}Cw9OVntyYN5~-S)klTxOx@kQ%S%#yMq1H7220t^!7jN?3<-zPMUJ z-?BZhJ_%Nesd{3P97>Dg2_q(`nhNz`aTo1EgJPnH^Y6M(t zbeakoks*6fJV_ITJS2S?AQCa?T>QLd7m7*s@Whve3o?((H)&MgV=IEZY$I1)85Ms1Xhrc zT1I&AYn7F-uxbbR)tvJVEU0;9cq^SzY@0zOe{k$MBAnWXM~nOm zwTHQs3_zqRv#GvI7BOwbdLhYJxa~>JMdVa}Rrj)l@GjRoe7LFFLx-i+axw~fhBiq>agiPHnFT>2n^?6N(Wa; zx{zm9K{qJ}fXG!S!fa_AZl6$qSN28n_b;NdJ06^M(wyq6zGNOBpxj;C_gGi=pi35%2Z8Jb9&Rk@eDiwus2ZVKEU zC?oMxjWh%y)l?fn$^;3wYslK@sI>PY0sMeQ+JFJ2FNi>n$pm;l=Gk7Qm1UIt$Zbg^ zqzb~4jsxngS$at7DyY+R^d*{8t4O*eC29DS+taGb1HkkUqQp|Frlc6PdYkrtkA;*I z@K8~HzevYHr3|3IV?!bh!fE@2s1~ee#xn`PQd2=pqm_Dq(mr&e(gwe9lI>OGB$Q1! z8!3}MHScD=UCMk=oC>R21ilvo(q83Q;q=gCd_1njtr)S#0O0YjoD!)o6v)qD`#YosP`XrMMB;WbNdT!^ZNVLX(p1?eFH zK~YuDrZqHkhn4lY?;7+6wMfS5n-O0SHvm4wV!2KPN2OPJBWJMa5%& zxe88FszE1dfx{&$K1TYIR#;h;c^&-Y7e$a{BGMBMCGn1lOLJIM=DK~l|46h%v7_88 z%5pR}>Va-vwO4-e!C<_*X=>zlghb$MS$X>^yCRcA(yEz=P3kU`J_tm!PVG}G@PoeInntTo40oLR^f-U(XZAE#n3}q4Pt+dlFmNOMGPi8IL0edQ1!V?73+D1Cl z5tRwwq_q(h(M6yUQarZhWFPVYJKkO;8t~J*Qinf;7Sy7-TAi7ukZ4H6Af!zH7C$d| zgdmF}6U;ehO>j^ikr2is6?Xwl-cdXNOcNl^kLpYlYCRxdN;BDeKb&?dDx!O_L(+N} zHb*jzSbQ%nx7ydtQ}^I;=5keLwxvPW;0aF$%zR`XP61rVZV8^Lwe&x>_e$4R zCXc3+g^Nu@%}6IngwRQcEm?{}xkPi6JaR#cKd_OqFE?h$XUd%z8_6x0=v35R4jrWZ5OHY~oFP=@2C`G!=(~GnmPZ!rFrFRH8*7$8x)oZ7}$mLLE zDy=tw5zV&E`9R8AP(@hBPjU4qHZNe{bx2DmKrj zdt;cN&|Fm-Fj3;mfFvW-lpsD1kDWY(;v}+DlH9g;IKr|CTE7f1UxJWzHb52Pj_DZw2KA zF>Q1NL{MVb6sD!p8UcdTm!`Jn4g__OI~D~K-0`pU{H>GJIq zIS31`DnnU6lc+r=RtpL1DbkDD?iaZmm55{}`4{mFfWnIKLevk$x_D}32xYPMsx)}= zQE^L7l2k*Jkpq07k!EHEbtNqcv@uley-1_w;jpx=rH$5ef^3YuHzTYF={a)bbXDD( ztD<%!v{FSy0ZZRz-K8_uHSD$rhEeKO8XL4nCX`_0QCd+%sl(=rA#y;pf!_po7r&9A zicXZg9NWfmSY+dI9IM1*9!l>$^j8%@L=5U59Ghq*Z8T^Sei^Mb6{%S!H;3_1!OnWM(n&0_3?CM{XDAYwe_HPu?=Z5 zCAu^wBPvXU=Xxc)f;1m48OrcK4>^~s^b`3B4QV2Xaecw^DrgZd@os4ZaZBuZi4Nv; zbATv6G967v@V}zH$L2)rfP+I{f`==%p%lk)iXBz}jX*RcfeU>VP8DBn7K$&6S(!ou3|h@M-c%g$UMDThe=)Y1Gt2_ycYS_-;>% zN;?N8{08odnUwYvn?(PS+Gv)iro|yWA8A;D2Q%4$=QO;shl z9i>R($!TGDGalNs-e|C4{1jAdxMZ=aosEg%dOtNmsV*IgSOIC$bH+Cs6XZm|mE{Ss zG60x?K-SzW4n&b@3#uc$m0pwN2l_O>=;tx{;A%dwBNQl^arWt0Bq>boJWj;vf{WTU zWJQ7NPbHT8vuMA`$x>pkf+N&}NDC2T?Bluflj&`Al@UaX`7)cS;gCD7U zuZQBSHH$;w!i4Mxt^XNOB!NZKb&v`}t~QT>Rz#o*w7p6JBB!F1>Z_`^Qy#k50$1%f zg?VNe%k*_M0F~Y`bwhyVWlb<&KCgpZY~L$tXYtzS?5wj{+ zX&O^$4W9Fnh4c|9e!F`V#2IiQ0RtNe5!-ut>UalL-*faA>N& zS6$Q*#&V=~`|-Ub86PQ#_Q}M5=@f`iJR6QDgHkdYkU=WdI*sWF#G`=NI~zVFVv_A`git@v2p^ zAh{vlf;!xSs#)eIRd6HnBV!FchFXFC|FX-@4}*X=1y-^n!!a|4gJ4#c4lqPbgPf@v zGr$h6)_LGWBXJWbVO|FuR*8f6Zj$3VuM!+{XvHC@5AWJh$THtQ)#jE3a5Ia~J4mV9{>5~YTW14YD90{}z ztM)3|Nym%hK(ty%J^dh9kT0TyfFn(kx~r<+kYq;&!<9O+VWhKm#u8In;?FZNRczqn z+Ix{TF>G`ST@d5rd@-p>$1M82v#E-pt*76ag^dJIV@{}N6}Q-=*n=W0YzBC!C2oZ2|z~~07e-(jwr&@ zmy_b~p-qi$3%x_Uq1DG6SMmX^70FiUkgQ1~w}1}s62CHy43o#tP;RI*5(6N4p&FRd*~ube z-YefL_4fcM^JHEDTPCH@59p6TrDL~@7(z~8WtYP-Wa5Ur6H_355w~hLM{47j<(JW) zB#O1~rJ+o;#F`+h;9fDux*%m?Ka(w-PVrCuxOf08lo?hwOl3lk4b?@yu#l-==I??I z5(+C>*;J@TB*hv6(omS9l>|@10{`20-`*3}5pZSYMI9qjI2L3^8sgCA>2jeh!Fpw+ z5REoMraWPoBbE`dJZQ*W>Frktr^ez|jcZdUB)nV;Q$eOnEGxcoy98sV84*4!u_Hb) zRcQf>l71@Q2DFyCCOgwZp*X6EIr&Ydl6kHOROwa`NA1dMURA1uY{CW*JgL~@V6*l5 z1I*;hfDAibVhWSTUxDVaZr;YIVbn$X$FPa{P62mlrAn5qHJ4*|rqq08u^v*QBLu03 zK#WWVktULsC$#Toy`+Z~mPQkh5@`QYDNduQvTRU6R;Yb1y&7OwtS19M=qd7P7I`(0 zpEQxGK;4d(ti4KULe3_Hi^_ZjRffPP7l2YcgWSs0YI$W@jm$O3H|lh@i`fnsA`2=t zbb%x&AyaOI4=V4Kwgx8{kjzx^kK#M6YP#?sDdkFxSiEmJDT2Hb2D)7tB;^?5A^Kd& zKN?1dOR|c5w5Y7S;HbtE0ajczPmRCAxr2D7|JeKt9M&#ho=`f6AULB#rewjju#C^p z1Z)F`4Y8=<>$PgvP)kTY?FNX0<=6%s2)o0w%S1CDn(yaStEx(qm@F@Ic4Y5P1$1Ib zqE6R#30{g`G*!M~{L?3PzwiD5L;<5zeK2#+=HU?)aPl!;2CKc%- zI?SXP;sZ0kG@$ARQ6;1iLQeO%bGR4KylF-XW*-qAjx@TneY zQm}JHKNR5#be-B&W?X&w#!^WK9alhiIA*zl2;kDkwVR5Mes7pjM@WJn4QN zsn~~B5D)_iEA5KL$!YQIx)h2T$p&m&fQO=jF2D{1U zu|5$D5K!m0^IZg@gd2qEnVZ{;XvUBgD-FUWMKV(yF3JBy>f2LJ1qa8&qM@aNw{c<|;T17>oGcyMm3C)rZTdmT)a1CYDIa zQr>E)(WhBQ8)W8da|Goqb6X-^rY&Yt1$hH-k#$IkNP`QS8&7X~L;GGGn9o^69SNj=P*M%O(_|bu{gYkCB0iKgs%}Y)n|3@X^9Lku2Is2?uOR_zlr< z&QJs=nyE$^vlk7F9H)IRFit$>zEsT3b!J{s>161-!Ez84+K0fW>QWd4CAN^@@zHk% zZ^N%RN*F^>S9%bpZFFI7J%W$}m1T698afal=&CWH^YB>~hb~xsv3)`j3cQvBI3N+< zVZDg;(ifvpgb=k0lJ7b{DnDuq{_jGTr|59T{UqyYRZEty+=o1qB-T13E?^iu?O^U z(jUD$ic@rl;vJp?4U-S{wf9mxkvLC)O17gZB~1D9~8+*+_QNNHIRMB=l$L#7f|55OlY1cV!P z_$-%Jx-_Q?N=dgr%}G(N)s=^^f&!#asPFt_KENQk&$ll887z{2l=p(_FwbI*K!ga% zz>btf!TC51YM@USS!H_^96%XaJWG;*@kg3m_!0S$2Tj))0R2U&3vc?y0HK+*UilZp zOB2h6nTAtLEG*_~zbPyhf=j8!c%yu!%92sp*gd^ts!FmexG{|!)z9nZmAzvdu^(>a z;1XrvnJGWvmVE0J1;MSf`<2{Q3gA#tWQBJ~1A_?&3dGoGk&BQITU2sAdAO4g9?GxN zoN2m?p>VW-BARfQT;1LKvhs3OE-K&M%N%04xeh_)(tO%p9uH+9$4F_*y>u=SZP+Ej zTcet`S(ZoJpN?_^T4Z!?t>RbSi}o9J8*z`3?&=0sGEMhTH@e~jBzV2rhX@;^tMpOC z=}5Xz*z77JrK|?g4&46+mS5J&l*C$hPl;9&XhM*)5o|YK}e3&1{lqoK?$)w~{=WvRx0(zjh z=b^6l1!n7r!%Aq*w{<*$uu-D9|D*_-Pc5jvMo=hGak?Qt^yp_0y^^9=Y~L%R^ZghX zEY?xz-~dAlTM}6+s!gV^k-2tBcoUDoXSkR>L8a_S^8=XzSffOR8S#;lV!fGD#}(pV zX-OrfjulhNH(OaU4YGWZ%f9Oqe){+k4bgCTJFy%SCX-T9&2&rEZGQ49`^7mhY8p2D zj!75#`d~Fl2jw%02~Ed!L1YC35HfwlVVRbK=~cbNcWN!t#Rc(lr-~yI=Muj#)mo0M zZT=r?j(Q*$rwX>LNjosp{#G*%A3PBXqwJ5ygIWfw&Re5>CCr~VmmHnG zatg~&s6?J3vy|(_PqlOlQ>!qv5T-^DP&yVYD8n~DqfVm2RS2`sPz7$awGGRLxmVe@ zzUT+;9>U8Bv)zKU)E6%*`n6Y~Niyy?a&&V6&CWwi1;gUq6qmxS_^glpXs;sFV23_^ zq>?CiW>w6`+u%L4`^dBMRRu=cUX|v1KV_4gPEkvP8#RPRzqAGT&oqdt6?MOmc>sB) zzWT-kBZ4Ns4HVFBjlw~lYfA7}d#~u5EFT8R$1z}FgqFGmUy4Higo(gQLyFp~>?gt$ z@=^LlSbRp!vR+2~t?@Jq3e484hoFd9LKJu%2eHstPu-O_H3k zW#X%LW!fw5N1RpDg2CjM{6AS>u;vTI*iR9P5dyzR)fFd^2Dktca0@H7_Zw<<12#at zi9dXWW)Cn;B8ADd>eRl>M2WDfF-_{khp0-aEFrlJYE<6g>p3>bjzKHxZR%m=q?jfl z1gB3mC|JGBoL22ACV$4Z z<_}fbqM6sefHWiR^SC%M)0P=%Rg5zlZMw=npqk*rTLmyej7lbCWP}0Hi&oE;K{h@c z8ok}1Or&F1@Tcb#>TEYxNA;CVqv3WgLJ|0iS}Mf zYg~l>RoChqOxvenAEpvgmr)1V7d3y$NPCsM2}z@wm}pcs<48&DI4dcL9uq0O91c-X zyp4jI1~Ji|7sGfNtVO}-VIvetziRR<8A!R8?^P)Ugb2pxMFY-qGon`Vf~h2*k;CzE z#iHbopZS9-=lr!uBs2+pdcp*FiXhaba&A>fMkmNqh(N4cnOxC8n2>O(1sK4Xf);?V z6ud#V=?wA~`l<1aRBVtTzBnVwGS7qFQKYH+rI4S2fXoRW1Y(lXhTzmbqFPA^?zxq< zYvv1R#LK|nz=*nf#F@eL(wsl^Y8n8{!rT2qKY=6QF+`6Wix?hPe1e5IJt9US9{j4B zTXB(0!4rZ-lIaXUp$4K&!4|3D;EWafjk#>^1>Z{DiH&`@7K>tUct)D#d}~Xrp}RqG zlZLlfnQbH1B`;4)kunw;AgAh6!a`kU&`J#uALYGNMno#ht+RqWKfNVBZeV4^EZ;ez zFfrES%Y+clFxs`~>#%JIUXYYN-MNe~jn@U^tbOtGxM0E=^O)4;gmsI^<3!29u#C&X zm)UY=*1i|k+QelgtW=1_oy0{1dbG!kL>H-^;^+z5t1=TGm9OoUcfk9JWoj`#N`mMx z0YGA)3JS=yS0PKH3Zs&S)i43-SIVANozLM>v0|3kD3$gqE#6WJXDa%MFpi(rPON^2 zQqmg(+cUDg_$UCLcqJX3p*gwejG*MEGcnJknoPY_B(ItZ`{nrHIHbwA_D~JcU!}xi zE{28|yAQaOHKgHXzK%SgXS@i=U}_4NtGt@I0-3}CgY6|x$utXoRyj|<9(WNIHpi5~ zT68#sOw>f;Us(-dfM5+tDI*U9sxWAxs6Kb(l+Y{-ri_7w+cosH4#ChF(#RRDA|Qq? zSKCmwMVG0Jzy>6K?Nz1_dRI**8I-SxD25nHic_M{jZ!SVlJHmYY)PHKF7+0?w;^NF zDLfttUTD#LhmDAoQ5K7`TxK(5%2vKhEsd1d=L;{e6#4)d7NTeqibXM)Z{OYki+DMF zkU|c=AgwxTV6t|Y1$1cNi`b2aP=QKoiUVkCy(dO2qnebJbSGQEI+09WhY=E0A#hBB z58R3|MSE<-oB2Qv4d+qfmlVh}8cq@Imro{feHc8mU;HqpPfLFeK~-aBc4QcDZV8Vl z`^v+DMM^%iDix)L>WqO|?R$~vNH;(*T>+_iXDD8rFucMoIHX$s^NEP|Dyf59OMaKO z<@gt7Yo-Cjl6g=xLGf{dZhIA#ZKAm{Hx%g!>0YRpG6qq)o$9arP^GBYp@dgVOMj&- zEOfLU7Y&Uda{2@UVYvh5-QJ7G09kR?+Re1gtXeII7O`ehG1P~$sFo&aQF~Qp+2H$3 z&-OaFdbaF?!jEOf4_vKu4lPvF&P)S_j&Dfl$JMlb2DGkzdhCMU3Qn=$rCEsSmCzPe?`y zbEO6cK+^J>dX%0Rbq5s{#D$NkK^`@u_Xue=`GzeiHR!|;6sb*tdhjx6U=Bx}VUnUg zW?lIkJ)Fs%qL{c()J@eI2~oa?#%Z_j#bTty3JZ`SFNY^X>)Qy6Euswn0o5X@pA>-!MLU?Lzf%z!7_MfWZ)GCsap9S+kS#5@oYgXKjRlC66`Tyc=fNlj2 z1Q;2Mz(^yd4nhc`z=t*G;wfPKYe6aG_#C@LR1dr66^w=MEZ7olnBFyVj==WXYew$# z`79SQTJ7&Me&41FO&X+uLn4*5(N=GRw~`5_ku8O#&@SkP_}r8*!wMqJGZUp(w2A~)f6oJqZt7z+Xi_4<$y8qeCRAVaz&T?)!J8GMkzDMm=9ylFsN(%#VEeOV1A zp4p6$nm#Nqf(O|XQq$+dAVnnYIEbJ2ULRRBe1(=wo=dANre<06t-(@o7v|Hzvi2%Q z2<&FMpB4;-oVY7MCm-qr3{p}Mc5!O$RZa(=3&-YDGP3#fC0e}XzMfhLNDjyH+pA;< z(O>1k1iNIUpRqRutqyhRKivl2yt@L8P2oT_f!i zq8pH|5_l|IN|0ot+RIY-z=?1O;G$nwPfA)nICKC;91~trm33`&*fJGmG4INOlt=`m z;I={ijM1flQ6tT8d|U~}#`n}gINKq({(Dzqwh)9*?6fxd?IlMTMYzENA;(DVaF$a7E;3POzQRPpcoBl}P}IY6;Kl z%717NX9|dsBg2t6S3v=x1$d*CkRIYvRS`JnDZ%66tTiUVebK6!v7iLbY*DJF8dp5e z4p3EiMpgjDWXu6V%N|Nra#Uu#p-gHxYS z$OFRxaAcV;YpH9CT=YFMUN4tAJtpqU_oSGU$^xbuyiV!LnWWo{s1P(@`J!mX7!@xn zH{mfoP^N9*ZmbtMq9TzT5vPlv3h_`Ys&p16*m-a~rAiV1LSy29A*G1jC0?R%*MWRi-b8=aVUj6fX*gCv1eF^^mwJ}dq( zrFca>poC^agQ+eil~4)3I#HH3?Pdt=vW7Y&3~jKVnI>fAz<8j@@ukHVT+p!O5#)32 z6Y6Y*{4+rf&fBZ7SLuAm^Ll04R8t94O!8Gu1{D=2zp6dC zU>l4R1rG|J1HsqO5ov4-5rVUE&SHWz0<}v5w+y7H+sfM5lAb+eG?R>6RL3vNJ77wfJqaQB+5{XFkS5vdcf!)I6;KA#xk*lFNj1HZ|mq#{1Zu3 zW(jfuiSvE6e7G5BNPCcxC%)DvpgBaD1gE0V-YY{fefCcUn-t^Qm?U}!HAI#E#O#bD zaX!l~d!XQQ^=rIb7`<@ip3$;0F`JDHWCP`-2=TT=`hpQ8P7<$#NsMaF7qnyZ#2WNN z`-H^Q*c81hX0(Yp6iUR~9Ae5oQSTv)(H?aQy0DXW6cAhI3&~bCR?eagN!%0vp(<%W zoz7&8GfD!5odS&Z1w+4KCQso*9%=attB7&EigIA1ZOcL>t z)wcIi?#tog)fowajcv_@>v0ueoLWENeM+JYJ%nR@-&h? z#UI*S-Yul&y@xU-~b5vfR>r|1E1F33vx-(MlXt#Q?nWc6VXnHVj&c0qu5Ou zuAG!lQrY}1b2}WZ{9QtC(*(jVcwT$II*Z4Uu4G6%5NS%FPv1hJr7dBH!j&L7I4Nz}j3*G@2>5d*K}58vCPl zkz^?!L{bMw;$oMOe9#gw@2sJA+Wq1&QHl{#W809@$rT4#Xa+d zYL98DFltomZpv$bY35H#0SI8AqT)q~NJLVJyxM|%gZvgp26LpR!v7*Fib$q1(%214 z6taL+L)}%%EbF)J959AIf>GJ+TlULDAqPyEOVKYH6E01!q*^^n4<((>TW+t`0&raZ zm?>mrlugdC%XB;uGD@eYKvYPvBjr^30*U1Sl?X2MEs!=jR3xqhDw!UU5`=!qu^=L; zoHwR#(46z3985F@G99-Np7>G~D>-P1GR*_5l!Ym)Y#xVtQxMu}n=Ok^*&mQs6)m z$xx!7xd$M;LrE3v&K6L77Dq8KCF&Xf7E{aTLakK@Y(}E8$uW9X$v0Y>%PAfJN>PtA z?^j%eU+L?47r-kAS@#&;Jn2eBMB=|}L!zxMJCf|A>QI4HfVvtI_0nKp?V?S82khhj zaVn6W6z3IG!)lO>cv)nf%%GgFwCQFts_7+CUuYK~)fAY;mW*YJUm9hvf_sJG)Z%5G zpDZ0yMCm7F7_+Z^pB1M#u3e*Dc{ozbn8PlFaRo`sZI59>3g*Cx>LW<@wG%mnxxNsk z-ZYvjDiO?b#hI}OEE|{wDx)LXdl~O-BChgCX5DMi<23U<>Qv6t^{b?dVbSx{UWFkB zi9nisAP|M8@!Qv7h$7VkC@hnCh|9GCF7mQ>Ws{6($mC?qphmWU1rMB| z6U}(zAu_TaUmzD1eWYT93Zet|FN+(>8m5VztP<0e?ziOS4oawoP!-JD{=pA-xffg^ zEpgs?6+tfFcrrdgp)OxiBfJ&t5piJ;n(HsNC60@PP)cZ63zJ-27QgM$jI0bZTZ@GM!Dpxw_aL#yWkC# znN)YJ2?mCAP9B+38P%=STn#>YOiX4%A0|Fbu6{bPM0d9mop5XQYnO!H z@PHm^MvNa*rY$*j*dTQoo&FoA>x=b5)p1KQnS9hl1>8!rfBj&n_UNtn55cqS*Js$T zO01p$I53Jfj~p;W6-gRk;*~g|u+#1rsA&jhZj=vjAq;h9(!{FC$|Uf)P)O{^Nf*imXbPDB=xQHEdi0F;=SGOic{1R7uHD;G#XtSW(EW*eyem}JtG z{ep+3U+TJPC6h0=do(aLEr3D7 z?R!C<4 zcu1l5Qm2Nvhuik1Kk`G zA&G*teJ{FAi5&?x6d>q?DIyk4r?X*mI_U5o91p6x>{kk#nsw<}$Obeb=X(xn*n~nV zKrrV4zQn4Cl(Va;vLI;gH=^aQV~Hj)2g)_*oe+= z+@39^KBb^iJOCY$#yM^*twyGg+9e7p1R(kX6Ns4#WnO0yaJqCa!Xm;XY^=eS>=(k8 zgz|6@eMtAJj|z2+1WYB+ti$Aza(sFihy{dFjm*blNN`&_B!8oGM+~E|gdt}m-=s=F zQzs#sG6&Si=&M+-^2TT2j9wPn1~` z*LO{%6r{cqfD95Vw!y7vb`d2hF#r?roYX{Z3wUrE2{Z&49Y9?n;m zz+5DQ!-0I!rzQuL_24a(t8IL?uU$joM9+SD;XmKIhZeEk#>7+MrMXKEXBrat+g^nV zq2eU=mQaFJsX;4qM%vEb;Egpd`7f1cv+pLCNJ;65WrpexhzI>~G8@G^)yQ<8De$-V z0{PPop>`H+6;43xHp&EeQ>wrkgfCCQq`m5&U$*8?N=#v?nj>^&WFCvjt1!t&GQ~+V zmNaQnWkLk)y`;owS9zkw9FT}J)#ERdPkB15UPLaOX^G~rZoaaVrs&9mC@Q&pOr_>| z#~cfW1W(UkdoPrgTu8cVVl|6$s+?jNJsMEVpt=AV5iLi1l@JHAi47DqmS3$w!XW?x z!6LuyGe=E?E-N1mW}1^&M$$^jhk}JfhZx2-QdG&>?Mrz=Q*b0SWRP_I>4!mNA>AMw zIL`J_wMqfM_$a1^2PQN&UQWI(;2|hd$ot5Vz%W%<9;Nswgy@=sM3m{oS~;o>{(#g( z+MI!q0I&*oxe81$FOJ}da3JMOlxc=$>fTFrU@KkwsgAE{r)4+vvR$ zDafkm?^5w3_G_=wmB$SVUSg)kLxn)eCAS8o80AEO034BBmst5BX;D>ZLx?0ps$`5< z3YeLx*tpdCM1l5Rn0f9A#iMTE`DE#Ow3IHK8D(lt4LD;_C6h9;fHn!Cp8l|8*Q{6C zubs}E=1@(L3ht|{hPJn;qgbA_lFC=%fZ<6?sXnpj-Wqcw(zj2T(uPDjqb5`XQN{Ztg*6G22e=V_ofvqE$YBYY>!%pK2NEL7jOzF%%yuuYUNa_bXzO002U&qL|FNh z<^XAcMRc*rp=x5~y}}%0Q59am3Wq7OPw6=W6D2|%k=D_+lzYj^$Oe(fJgH2Di-O-^ zOU9S+WErfbzourR=@wGyByvV4D`|#+kvc%lLW6w5_eM3KRO{smxe1w@v|6;2Rb{ST zBn2`*ayu!V7J;gDWY;P?Z~}!QO`Ab}!oDaT4UW>F`RD>54yfHP>+B)ZZ)nr_*fcm~ zE*^FeO$00HKuO1Mdle9hn$sJYPvzh>)Ly-rGJ?~|gaHDpLq^_S1^462p>g0ra!(RK zh(<`G)SP-RrH}f+7tJltnz>Ado`9mHTQuSrU^EQ!0K?`l-LSIF_Fhg%n&fEf_47Hk z1pgStjJKgKNB=dnG_48?>K&F}# zX~vb+6Q7ZR$(EJ)R61}b+<47=krRGj!m7}s&k<}I1?)ZbA>X*!r%@x)*%onf>Er_1IGaR8ghqkZGimYovzgD9NP65ViS`boeOJ48q0sUa*F^jXG6_ z^_ZTQA|S08MtCS!=5$F@D%*p}32{J5q)?L{BzZ7Fck+7dH0~gcS*Wx2UWk{FpJIe^ zH0#kBn<1eYA?s-KDT?*UIoqr7PDUt*V!jfdF93v%%3v%;|KU;P6BNm{ztR>eYeoo0JnA69C(}=85cnT|;#Tv50XSFA^I8{^2kjvneA^th5C=qV&{0p^5>RKy1j} zLqw|b=D4Uy6Gw5rmeW>!&R@W~vC{R_~m^-3a4ARd$I z$P(;Q9Vrp1HAgW!43U>HBWHO{O_;GT9{QBAI|-tIxOYm!nUPLp#v5e_Si2f=X{Z}D4tU6+l)X!YkkVxJObY>Km@vw! zy^5)(To^hq5Hb>7dPLAjrBdIN6KVk=;r1$(ihS@Zd3f?yvo3^MyDF(w$G|K>mbzHW z6DF^Wj-B)fH-{CGnd?tQVzhX zAlPq2`{(b{1hlQ%jri$=>YI?mIQ@`%SC9kuCo@q9v~WxV+T-VBH4*Aa=c5SyC>Pq* zP`1sK0&Yw@zxiAAABoeMI*f?Y?uPh87!}*lqM<7-{hW3f>y?j&(3J(CDsO}8X$}#1 z+I#tvOcBwZL@gqtp_R$NK>rH+Vk~(GF61cQhWVJCLAODjMv!LuL^RL942d5lNK|Gb z^B3H!gE-&t5Qylt)cy=2&X;zC5uA^1DOg>_wEFo^+no=PsAHvnQ6DBD1qDL}K|nW7 z4)9z(HZzqAB3Y2-Fr;l&x+E`4$BwVkAYfr5mGO2pq{!~(;HuE8>It`6-EocH$xs5x ssR=+5=J)%*`0n%9&p-eCy}$qdFMj^BKmYaT75MWC{CNfbKV5|wAET=rb^X~QpHxrWn?wBG7^hT%rZAS zppCMIt6I&B*sAW$x^^~}?A@9*Z7z;%Zd1bHNo)@P+`XB_^5zG{6C%00A%&Q}B!dDOCeQYBT@sB%akmIJ}j zVeIq~?cu|w!NZY+?n0H4KqQ)3goGrrP~?&jk}g67lG>iZOPpgZjUzukF#B0^J=581 zPmW35CY*IYMqIjcRQ9Tv%}kJzG^vA&gB83obN`|L3xT( zZG+mDiJ;DUz)L=Il!BU5Pe)J);>kx8UO|e(jvb20mwboCzAR^5=G-@{ev>XCiy3;? zB0$77VU#$Vo)t>)j$^&Xwrxd@XWSARDt~4*6NHw+3~gyXm$A%Bqo5cK8TD|q@naok zkq?QffaR`2RZ34(f-)({$`1Hu^|?onW-#U`yCJP1U{N2TnvG)?cRfNOzH!zM1Tq%zN{D^-RT>lTqv7di`={TjUsc3_CaBXCZBw5 zr?7Sfu+En`kKoyp`C{&D6p{_T$cGAqOz^kG>{VtA@I?Zl?F*o%C@9)0h;-GVrmOPZ z56Ol`eRf`n&OtO~Z+RnEONvPGr6nASd3HM*w?6ir6sL4g6S~J~f-CKBN1$Y8@!=R( zLT0j=Wa8~>Jg9GTN$Fp1Fv9|HzMO(=DPVb$v~jrg#I6&4%26NtUmc$ljEymG7#ru#6>WsJ(b(Cg=hkvPqKT|w^$?CC?GJ6P6x zbMI2E!>Qe>8yEHHeBXgj%JmpiH;Qcny+OT&<#6K@GnL3l#7Lr#Ju1dk6ZI$(5`GVz zYd4k}Wdke+>^h)#t&81N%n5YM5bb%52B_(!ZKC0z(%qco8$_v?k|LDY9t~Rpl1`7? zd@Q~|Ow%qRz8wRvffAi(>DQ?-29F=uVzxhE+OqehZ#4|kcwDB6Y@y+(50ZJ&z1T?g ztWzUvLRup;Z%zx+3-Djv>X(h0`Rg8`~vqZU3!h2_XBKc`OJnzVgA(X+SYlZuV4JG`yq@qAF4+sBwEoLaL#K6v!4^VC}Y-yPE9 z4_qv=stQ*8Wy6$lEtkvKOMb3(IHAqRjvha|N6%#TIe#_BC5FgEb*H_$*;3nYbHu}n z-!F^aq?`<8`0icSJ1yB=WjrF=gyI^$#93iS-$HH-t@wcO zsRpJ@{HS}*E!$S+@UM*zJPnOykq>v!6uVK3&*V*rJYx8~A!2H_Aoz%2n8cU~MA$MZ zFt2Nm(-nBKT&JrKmEcDb>r{Z&sOWl|#%32-A2kn4lnn`zBDQjoC{dC%Mw;5+~w> zTLyCmU#8zf?;5i{n@O)mubB%AUdm`d;AcM1cfeK%9C2oFE<$$(6r%o587rXAh9i~e z*R{|Y(2&&9;Av1?g>e{V11l4~^qsCZaFFcdQ*}pWGq+rEHF)!5oAkyR-f=?!!@cVc z^~ODHX|0Fm42gfdx7h-XD8=XEh)|&p4BS|6#_7)rp?&HrmC5I9)!tN{3scOoE`++i z^;b0dL4E|8nP4>P1#XJ*aeT46+v3eEnD8AmdYP07rKfEDmT7aw9$5%;Wc$#;_a>J1 zxUQ0qU0dIGY|pi|M#F@vC(%HEPdJ%}Dg=o{`rmlrUP%eenNl8rY>P0ASmp+zc=xm; zX72NGp!jV&MWJ)eXf(%F?__)@v+ioIKy)pDLNJ7$K1Y2%Hm`YJ@i0cbRfe~OOL!^6 zAkbRY?(Jtf>~+z_HKd5ODyw0TGl|azg3b0viXDkz5uH?{vZTIU#rIU4CfcL425V#m zN#KCgq8J&JU##W7oRjAKzHh}ttbPkdv-N_$C&4`!sc+T2MxI~OZOJh9-x|WsKV;E# z35m@luKA?8tc?P@-xj6Qd3rf@4AKZ))76)*iut;8;4DTW(j z4F-c+*M4gCPPt(hMM9kE{hr5Hf1^o87MP+Ml@_n=o&A2X-Xj(XydUBPm?<)&4N^Jo zLoeL$^t^2Et7LQYON2KT4NkH^Pp-;37G=`tyK@TcGrfBU{lL^rtno;Bt z((r)Co4|EyNpbDV1MLNyS8(#-7#3mr?M`Zt=|&DY!l^OzpRnq$c=CZ2C{rsozWV`1Ko6)ti(guU911+d!3 zas%Do5-D}VLrUzhYa?qx+!mLrD%NMz8!CWx_p2t=?!9}dn)M^L-tjio0mt`#z;p8a z`}@H)tXv<#P+)!a-HjXP+6M|)lkomgYw=IvO^JKFh->J#rv0v)6y~KS+^XD-lzk5R zgw}ny;_9F)cXE4ueN`;l*xrekQVba8sQWB^wV{kk$1M$>WknE0eQfKM^;nBV3xV5|4K* z3j`fuE;d>@NkavW?g_s?bXU-E*eXF=qEgIiDTgywH#ioGW(Xf61<&2SmYBQoj~JTK zrqb7?lCFm(#cC2|q&5$k2^(HDXD==JJdtdk8NW>`N5uxlB^E1PIW%%aofx%md1*Dj zFm#IOges~Lj~@4#8p&c?jy`CshPB}Jdm;PpvQr!2t6N%uhHdwjO*PXr5&I`Mps57d zAZj-4RW_B*7FZhd#3tUKY}aZ#OvI8|E1n=^_S@24^90|UH<#CD ze3}~{Pg8?)Ad(@0X2SUb6d*Fnb=rEXHmHX*&EpDPLS|tApMtFNo4XsbvKSPv7$*ma zYb1(*DbpA)|?!{(2*z1%XmL_wGQkBRVZ_a0F2j$msic+D?hsd z2MJhNlN4d$dKO9s^$YtYbIs0xeI}PCmDpM1r zdskX`fYu*USW_ksk#{iG^1^eRPZQvu2A7T1JU@8TVl{J}H;V%bq|`QCR`apmHF5`f zSsfX`OwTNz1FBS=_O$u(XVPr-f!;iVZDd@8)sqy(Y&RW6qwkgbY+)ug7;RT`BibAh zu9!p|orL;JZeLMJ7spR*IXYM^0qZN6&q*zVDIlV`E@+-YCXc>e!B)=AD zCLky!G$YoSD*R7f*1T-Kk)?hkDDERv`7=ld4;qL)(%%{CVkOv)Bpj4?rpT$7w|mYg z-WdTJYx?=2ldf!`F0U_4$!qa05EOWv@-v4`6r^KC9AO`n(dk-VU!8m1g=4&F#Vr6zY{vPO-Y0x*|q_4pLA-T*%0%@a_^$y72$|xA*BA zCzRjnwDDD{*cN=5*We(VZscD>Bzqp5;IBOfL5*uK*ua6GgPa~Hy7#bNR>&M2Rp)Kb zH7AfLYu#O@NS)baAi9bVeS*9~&8pB0c!GIxdu}uoBp%S_A^W3t+d8{y6HwjET37D| zO^NjxQW~o)!@z<&$Er^D$v3gK)}tt>X3VxB8MenP;Usa`eoGLUO*8TZZ@ z?|q`ct70!q9M#$qt=!Qk+@6#)-1CPXWgjWklKA;UMQjVR%Y0*T@Ky}FlTxHVRTtJ& zz%QM8#J9j?XcB{tA%WF;(214MCIpFpJ1zciiMT`QpPH}e&dyYVP9P9!SW8Zr$if8Q zHisQWG0IT9t^CH+pZH`W8TYeBlcGD7Tf zWm?g;F4@QHyZ3i%Eqv;gOHbHfeJbLIQmO12IEpxXx{Rtr(>r*Kw+iXCYch|uDO6bR zs@ph+Ec}keBp24~&DRmU9Bh;#2eM2*9aWk~GD2XQ=~0?7fqDDD$;3H-Ynrh@N4ai% zw!YL_szMa_QodERGD~Scd{>2-h@*EN`_vZi3 z&`R_6A(^8u%ySdbVV@QclXl+pr;^y|ip|*>SC(<@8n_bH@mN0^=&lEh8hKW}B*%iX zx9+#c_-n>8q_;030__^mRv>{_03h=l!tncpfz;+QJAhTIX=Y>Dk*1xa>E?0=uH>PN z8j}Of9(n*rfL2+u-HkmTJ8d&5Zk^YSXJ_4d$qNpnsa5ovNspyeUs1|L$JwZ)o83y~ z>_wn?EPNxn z)n5YXAz{WCr*ovNt{G$LE=~rG*w1T@m7vgWlGS5f1pA~L*({eds@_UKXY@m5OZ%?t z%p5uEtNIyy3q@@mTfmOf30!i4lv6cs1zT*@H%j%}6HQUp16uUjw6PSkgKAn3a~+ zH^;q*y8tAvIbGQ~oe`_Uj*wQxcI_+{kQ>gvSmxGRhZ*b77tIpS%Dk9?GrAJr7=#bKB+?S& zq{nVxZ0`4K9TTs|D)_S;C?0C``$Z**%$@FcfO<_A6UlWp8%vRs8??$w^EbljxXI`# zJJK)^?~dFrumtJ3ZUoxZZlz(=F;Wj*;A`{bo#8W%pRTkPG}Cw@VL@tp0a@41#6cFt z%6htM_g3&4))m`gT-`dMeyS6(90Gd+u+5fNU1 z%UW{1&SN1?t$5Ogn1avaH14ipYS#J%8YAg7N$4*!pfWt-r$DmFf{N)obyjM|v&V+< z-F6=RY;2(+gsphKw;%`{9u*fQX z_9Wplt(o4K3(;2eq&4}x^6rW+RDAzNo9vj(f!BjR4=T&t7otc7q>$j2pFDnDK4A8I z%>dA-eAfP30c-TmNRO;|pw5~b6h}wS`*+rFynu@7A|{&_6l2`-Nz;yzIQ~GGm@7^Y z*BD&vH&Nki3LMwEY3{2vb6XBbv8-p)#V#cD1_x^=3grk5?mMgHRfMs$fq{K?K~WGy zf<5?AU)k-v)xYT!jdHDURJ@KM7w4)c}V+(;(?**r!5W zRL?xpq3Io)_r}-i%gsg>B!c|T4@s|4)ZEi-37eP?8465$?xOD*7frDIs)ELIvd#vf zvGMDKt8(LS1*YIPiNxdPn#p8Db(tGn$jO7LzK3RyUsF>dw1-Hsm_X39bO;7*mr7GI8*Y^7R(Xn%k5TB zJSQ$SSlaj6MU4X%MKX}AqP|{4-X<`ya&R^6)CVE>;k;x&1{k~JwR$! zh6X<`(Jn07a4=0=yB!3eSfnfiVsSl|vr=hzx5(k)`M$50n&At`lG~GM=8$0WkBwWh zWREj~?B4c({bvO0v#v`&RhUZMoR~nC;ifZzis;==<+lz`Xyl5uMC(tSV;4hzJF(q! zytwFHi-AEamW`jOFVeWNdSW-HrvZXyuEND6Z`+DsQ6VMp67$zUwa0 zI10-^z&CL~%G)h+pS2+X+HN%PlT88{ea+V%xlp5&Du6=i82!v{-X{!}K+Tjl;^p}F z(uDxsO|u^&zO-_jmPNrv^=Py@K#P2P99~+8#ym=YvxXQJtp?(&Sfie|&0x!^^?g56 zt>V6JQ!morakE7%`0aYvbeTQ%S*J7fq+MDI#x#zZgWTICiEQ?j!#Q`SPiYaJ#xZ>< z?deq>8%rcsK54;mm+59$w;<20UE<~k9$1T|ypXo4S%CJ6$XrRCB`0yl)s( zIKJn$Z7|}B3GD+lVgd8b9&$Sy-M+t$u=l*+ zZc;`Tc=EZG4y(x@4+AhWxp{^c@_g+e&imj`f4lUqw(^?mF&^}?GUBZ4W>koPqV zugAdhDPE6(EU(c!q9eJ!-drYydZTa8o7h+^Q(59}hel5ho;po)0urf>OvpDGZZqdq zTZHSK$pW!nh-4I;`gtBQQSFmO>P)V}!CxN`E4D{qf82r8QB7YR(Ct&1+GEO~#jJI#;!*wgD@ z5QV$BKPn$4yent7>9#mNv|XtXS}BZyq6ZQN-FibtOl8@;YgnjHx=UULGp(;3;I1n^ z^He+Bu3xU~4rkQ+*vwWCRGFL}1>C1EfbvzY25M<^Y%-EQoPnh}{O7QYNfaFyL+W0N zeDh)9{42IKK9zPR8Kdhpiei3HtnSbmNQDw=a78HB6NPi|fl-Y5CohUMk;sQy$Y$n2fiPqWGOLeyS z5ttv=G4!rfdbR7>2gW7d7iiEUVNtHcKJzBIl>98l)`-bOr#s}(pZM6P-;xmMKY8yQYsWLwk3AF zO_S>nJ#|$4?SP`Fk?2%STJygqFTp7Z2vAvPCh&rMp5l#q7d8sE-PBArO8ocTvibLv3Eb(Sz8Fo5Urg|rsJT}tJYfx;tLIWIl>qKT zGnRB6sS0RH*f2u%mOtDBm##)9Y(EAHJEgN1jFAnO+9B0T<1d0Ng}UPz=5|efjMDF3)^TU zm(y~Q8x_e*KnD-c@*0F=ia2=5A)NauY&SAXZD0*yV zEitM~wi?_PdzsvDA?t~$lPZ_2b(MH>@mb#ch?8Xl%WRM__IbB%`7w-X4cM3lG+nE9 zioBHC&9^M7hw8rrb)xy=egP)xj#X(mJ%R+ANto|tx0aA?ZDOy~au9wswd|#)JDp{q z_b^)KDi_vF6f5CVDATpzUXF*HIHueeH7AJjiuYpiPSVR_>K7dsE6y)~#8f6cfi->E z>V}7+J)Nl^MROFmy4O3mvvjWYnWv$Zf_g=W3k>+vo$|UA1Z? zea(=wQ0~RLdKr8win2Gva4{7DwRZ7s+!wri6V7i*_V>})3IMDq>mf0$(s9ixjK0?oVY%IV&N zy*GymYbq^hKx`2i$fCzQorT3F&mw!GD%7KHYHW*hwMx)BONLugGiy_#fO_k&d9F=5 z(W33{@NHW7(Zt5at=A!a^6y`Uwv|z4TD!jDwe|yuWOX53JedIMgAY>$CJ0p<9km$n1?4V`86(95rVBGHpuNKq-m+PnunJ+A8{x`IHlPcriEDrGm`A3pvuY*_VTP4d%wLf%dfWy4j;a1AtKgT!k#F$r9x0Cv2a& zA|!465aouFG7b#%x!ud3orpE`v9L+8nb}OgqKuPjF&)=QP_D{`>G#JB2+<*E1oY_v zK&wUx+}}`mzEs9+700xojb(lmToGiPEqv!9LSgI3mN%mpc#;iJG`3@LzACU>C?NuL ziwhM6i#$DVZZT2Qtord@eA9PoUU@+H9Fu7&c0T%-vZG;s!fl8KLewR^8U@N5>wMPZm3E?5z~aR<``Y$ZZ+N8r zGg#n+aaOjmeyP$7nNsE7o^kRxRYr>*b0n@xJd=r_En-DgsK(B+PTN@{VIIauUYP7KOS$Tc0;0!POTBx9 zk3|CG?0+2&wsO>(eI8Ncw8D6>fSqu0{(7dSwbjI*rW{7m=_#s!$>nB0l^wS^-l96h zvsUzo{ibUg6c406R2DvT%Kf%y=R22ph^7pVIiW#gZ&TN4FLaSdZ=aMZ2Mmcv@m^-@ zCOcL}5*{R6hD73JlH6ew>(=v=%N9WInl}+Bak)fTDzKL5KFs?!p9&J!d)h^k9dZwx=Wzfhtz4x7Ze7v8aKzw z+fit^)p=zoCEr|k7ljK5v#5afM0#Dr=0hpBpO5NT(|&0ey}V1gk*b~KHqh}+#LAz2 z;3h>5VlNvL(C)3?H#L2+kAhWdQ!{1b-(GXJ2LU9f-84DrEkvOcjrsTz3B?EvE#zj$ zpS5Ux@Y322)4v>8)KocIHV^1zVMocySHF|WnlHeW2lAlitwLRW)F9#0wTAFCPoU$i zb-Q;32ex7qK)SD?LvY2tG6yJe1_@Cp@BYWM?8qC^wxvCS_5sS0@W94TNWc zc;U&tluOVJ|fy(;qWy1va$+)#j|`cchx<;to-HNxIBE zFIUHQq-f|}jklnu8OB6Gh%O=6M6HYjiA`!T0Mh!2^ibqbHdjN1flhhjJ-txf65Z#< z)d-_ccnWeD2p?Io6T!_C=NSnWXiw4J)|1L)pb14KY1)Q!d|IPj=OWY^FG17I(+^8D zo8ts2ltE6Bjp@l(teHxtod%wj0`lFU_jiM&V4}Vb{Y~)=cDPkUvl*jOvW%Ar1MO~- zjTZL`V2`eV`nqSlIZDmX0OjoUA{@0ZvE^sKUIH7LQ3*Z1A3C<3O>leg);{NZ7u6dq z3p+~5Z7`z=h~-~*((7^0POk)&wxDb=xzW;?EA^O#U1QgKk4km+kwQEg=f&6aN5b1N z9u-!OgYv z2JoJzi`Dkyf`tw?bvB~8=ZMU+K3U7Ys*}HYi|6zf&@PdfbP73DjmiXK)dU1ZpAo^1 zif?$rUFi^Gr!{CVglRgEwW4e>+0~w|c^VO3j{@#=osFFWs0g zMCf|&7tOU$&|eFeyB$MqzL$HSv|fu)XK_R!tS1_ItWmT1h<#VjWdrnT8 zi}Q1;c}D8~9(q>d>d8GIq4B@iZw!YfG2&ITa`rsE8LFg4t2mWV{p-|cr_y!!!d$Xl ztWZag*Ba#HxR4~(Xg%-B2W1xP&Q|Q-AFYsl z?B36BRESLniO8wuvL;qb1Uhg99lP0DL+st}wjWkx9QP(FN}*yd7)H;{Sz6vg6S7Jo zGl%a9+TQwkvKvR6lK}NdMJBCwl0!bz2PVA14n(NYWNr@NQ%ec^Y4U0;aRmgJl9Wi0 z1yzQXv6WPGj+tkW$3#I*dXOwrl*`5v#KSCv&Al{27)p30M)m{L$aQ^H<+QZO3YKMZ zA+%MZZ<1*%&<|DVj$fovJ+ePCz?6PWn7>-JHWDriuY5ZOMTYB&Aj5CI#lI^pN=k|( zA>@`jXi59l`^c!RhJ3$Rm@>l_j6meNblx=kdY^^8x|>F;0~?7C_-_5S%&IAuNRB|) zSNU@GDH2oNwyS~U;9h(!@j;l?X<-{FP^DtN%9u`Ngtvsim$gp}=t5M1;YH{eE{Uic zz`X1Auh~pGB@DdfU9WgK)KL-=k8twfGD)_YF;+(!;1`+>1L{jRsTOCAdC|P{+P7`f z{D=_>UMFheW&1X`mL4QTy+sC*OHF58+b!AXlSE$+*tiL-g%Bty3Kdm(ZlshaRjxX# zdW2pDjOY(bd6IPIr`aIf+-o<@2;4D$8YiJt&+5bOj)HVA#tEl#XUtdFP8~U`M0aAh zrNvV7>}M92Dzt3hahfAG_A~iwk24JFx5(P}S>4aGIm|uc zTTml7Z<%&XkdZaOfp z-q_-GWz8)LI>FA!&gGN@Tv&s6H?X@gSxFCR=#|cQ4~5*A&F1J<;@bAqDAqQ_P^-Au zJEhe<22gyu4y|NOw|vBX#^qypc-99mI;z=qijK|iXA)zeD!XfaIn_@4K-9E%Ii_>c z)~c?{w`f}0h%+>Wd46tR75aiT2w?u|TJkKj6{KOcn${dFNgX%M=wGoO_7xzTz=;ua4H z7G2J9pW>||nk`wIhjIam-!)0ZUwgLFF$kfz+3fs0@fa23If*Mj$sHCgP)Ne%D5RtX zYxdxzxpWp~=1iMPyFWF<_5FT`=`Qnz~f!(V+TfR2vH`w?J+q<+>fJ z>>h0HED=r@bCJpNB4gq~`FUwxBeORaD86mNq1y3~d%*n8dfPU8gYm4d=gqmVA#>-t zIcRj$iD_$bP}A^wvzgU96Y2nq?9hy}uqpXc5yjP0d~7>>aA~t0h>L4&t$y~c)p(2r zY~DG5KWgYk;fp=x=T0fk;71>$64~h90b1@Tp^9|hM77*IuiiQPar&bRw zA8z9`glQ(RDJsO@+VW91ZnLc|0J=6`OOCOr%>*cAgRvs4?}c3U5eu4UGkk|WCS&e9 zFR~E0t=aCZu}eLh3ZZTeXG2Q8V!6@PfW4AY$XZyYE5Oc`y%=aYd>r*}O|Vz8t$F_h z4@w`gy5&4|sr3<;cbHg*bs3-~4SC@~M@&@Bu0<7>=1C%eEbRbo5ML8tTeTjmTBQfoC7+j5X;A*h@Y2F+@%~cz zk;~q~X4&v5BAj}Ow0q~7Wx6w~Q3ZwYVzRfT8QP%zbYDE>DP{6ttOSJtVqxTm*~?Mz z&zW;jz3tW|r}`DSoiI8dl!0vn(tC3BUqS4zt)}>xs8SdpgS^ylI%y?)yuNC)`R*NL zJUM7C%ubpy_-QP?ZREYQ+{cFK$DY{;*={bN^7o};d{U8rD*bMJpycRV-Z(pgl1}J0>r_IIiz};$B zx#rg2X2(?<&w!R-_okX$rV-t9f%0(aSiDrcGEk3!&HLXoH&}XoW6cfWQV2MGrplGP zoOMv1hheq%A7)eEKdv1)~$S;^jn;1 z87(MxYk}fN;>g`t~(;tv_Sfk~T!NPJCaqJy=oMCe4 zhnu2)$Z-39@#8Z3i9GYE5-dF_i%tl$I#k%if2u!74f-I#mUWuEg=4z!C&=q$QJb9m zNrboz82~PFBn~9*2jbeZBvGp#?zs_OS<> zMpi?M=h9-Lq_FaC>>VM$LWwn8jY92{fVX|9^cNYtm?`0StWj?kJ~=&iVk|9}uUi|? zdxWMuLW|DkBoZTBJ^={u#gf9#K`Kq_ker>)nZ>h%qDzOQ=+1s+53@DGltW&;4m+C* zp_f*wTJi`PzVRoB9|3!31CLhdIH^vnk;5*I?M*MEr(xn3FjZdwj~u*7417xVL=w8% zLIQJ{fLFmari-0leD+FM^UUcWA$)=M>ZmO!eE3J*z*4B;`b-ysxkk!1peRqHGl8K8 z?)4}lPia(JEyAvum&k-s%fKG$W#DC{1ORF&b^dbZ5aTBM2A_fC0aplfZTVm4;?zqTrHVa7d#RjJDg#u@|#uJ_nT=TEE9dNzA?%x5n+qKm!cx_e@ z*4J>@l&Tb;3ob;Y(%DKt!9JqrlU)zYbH9(^88EA^p1&^Zm!GWd>?8V=<+DY$!$x;Y zeb9s^zeVyizJ`RcX#T2=kKqDP!&8EN{0ZG&GG*fvV+BQM!z#0r!(%nul?z5Co`d_H%84k6cktpD*A-5PUS=XWm!ej7t z=r|?}qRn!~I_aGJ#l@{#%&`aUg#hKG*oM-8>Uh;fcoFYbbb4ea4s?JnZr=}~8x6>Z zv$#Ac8W^^uUWxgPykMZ6*v_vCOiWvB&`NhIc@ge1aK%H4XIhui!XJJ0>*_m_sxYQ} zW>xEB3Y?Ya%i4Z4qBcm4nog%s*cn8xut*otZ`T7WbJu%E+3-8rG+91WRJvX9WRO~f z2bZkixt)m81M&lrQD!sV=U#)9At`e-!+IHCCvz#|SHL#n5zdH|6V&p!&eze!DOa(4 zoSFyS-#+Nclge|DjmXt*6iV2LbbanJ=AN9b7qq#X#BQn{ONwpw>SnEQK(>>eScit* zDdrsv$3Z!1x>?RiN=crKgefw2iVdkf2DN2dFM8{{tB9TSwC}lVz0Z%lfewPENRIZP z^TG9FUL<)~E!63S^+Kg+)@Ap?D!*8{_`*$#OOxGf)##+QkqSGoPiLv7*Vs9qTh6N= zvO&D%fUgu#*ad&UTeZm<}Y+x~+fP`=cnuLpe^SaWpv(tK*Ey9Y?Lgc<~diB%ApD_urZAWvN z0zDA&%%GJD`W49P&6uSTFNTjm+Qy=fV_YW1`(g(*?|FWNPo@F>;Wrpxy_$Z@dE{-i1-HFkh(JDQS)(ho9vE zaT33hH`hCdtpXv4LvPeu&K4GsxF=aRvA*GOi&BAh^mGf9KaqpGem%i;kINN$Fm7at z#E?1fOJK7cjyG`gJ1H92Jbf${FP|iK?Bb|}>wP^Wwmeq*YW^)(>mYJCjT&hMNK$JM zICeM4-zR6;j%~0;%Z-)o1`NYE`%1|%7|l4Y#KD}P+!?%g8%^UG8{)Q4N$yiMXNnwm zv4k_Y@KpV_ZE*(P8LWbRtY(UYPFaB*iB92S!U6B9dta3)Q*|11%QPK-NTB7v!+YHM zE*C&Rzg4cVIpukN`1pAo!`qS><3l2>Wkq(-dZo8U@om}NUXQVi9O>vg??T%}4s!Q} zi9--6UMGIe9%ZEub-y_R@mDI4l#1E7#L0SiNMh2DCOoWsF<<5Uf&V|}AAZsu9el8^ zkq_Z`4qjgu3>T%(u!SOmJs-iZJ>R4IJ1F9u#{8Q7gly~)W%4Ps zYpNxWOc@)H?cXtpS<@d?eTI-8y!&IBr2#$&_c$(;Ji52bu_>#*Ig-y7^+ji6ihk&E zUNsYqrv97sM;XS~drPYqLZfX(3BG_|WdiJ1mT@I)5Si`i1UnG~g0BJJAUO#=yw=k! zD20i15jwtbTCb*j z?gYL81JwC$)Yj76EpHJU#M(-c48mU_?7Hkpqf8c*$VeH%Z&Sq)<@06v#(c(OAo7RF z&)19eT@syV9)s~nha%%Z^-F_cd|Nf@q)VFn(A&z+ZV_;gb(S7*a1}QpWZ~Fqxc)G0 z#}E&3gk@gKlLa7rP2yybs4hfi$y(7@fo7ezr0o#?n`0k=lour_cHtt=vK+gF`RrOs z4R2NMtV`Kb`<8&9pvSg+^0E{p-51Le_+ZcYRqT|qu(Pt7wnmmJ;}^Q+_9?@OK)x(wb-aZEu|q;0_VCCu)~ zrmlC;WX_P*du48S(vYqPA=vW;I0hPSRWm?4>%hL4!o_%5Cx%Jr9PSA%r+VzA3Tz3O zr)@eJxvt$j?g7`3Im-<4nIjMvBb;tCA_^|-(%)0tvyZ}hf=* zwKE*gH18J4QQJE#1qQtT6t|J+?ySB`0f#_J!d%HqM&FTu!U^#FjR$8Kh*NaXY)*bx+pitKfu$cZ=f>K5H$3ZVYu1c(uKlnG0FL z-7fRi4(N3t-dLdpN}hIbFL*P&Rr6ZkbHzr~9ShE|VefIs4F(rdFRrp&Vum|1oWtK! zi{m2asTWv8t{N4er9e;5l5 zDLO9g<z+(`7F^>TPUv_XplC2DY>w>o6PGDP5>UFE{cg!pl_apb)YYy z83VI|v6+ocf+V@f6vWJ`TDNty-y{>>(x||zSU*N{6U3>nLFJQ9DBd}sUK`kIDee*~ z&b*P>&IZ6rcEycW$8FR_HQD)p^5l1y?*+dP;%--PHrJ*??egX}%n<=;Rfn$lnZ*ZKf+ZGZ6`u{`i_gdZQ zlJw8y?z}>ccBkuz^aVM6X*UT4>={g*Mu4vjJxbmld(=WRJyqUb$a>9DbA0UV{&ZfZ z4{xnGe!mtwO|Y6)a1XM1$BU3YaLQB#-gJiniW7D0biBp_VAr*%e^^~5Z-v$vza$-|cA%F!qo`+>{+LLtRD{p5mw8g+9s?^JHJ1$8V%Aeb6HB zTrtWcb!2W0mzy0>WNlYazY`s-n=X@e0_E~_lv=1*rGA>!RtF)t1_IUBa!JE)q-x(q z?CzOIH>jeE#mP&O4&svWL#Vy(6!d8|gg}>(eSMI#Di0r$4Lz;p8%6NPYISx94COI1 zx5mwaOVKtMIn1<{WunP87b;uACE@b5EpB-`dvDY?R0C`krD>?9LI}3CnV=tGRWzb|?b~rL9yrm)JS=jubhL_aOr{l& z7V=-g)EpsQljD5M8nc+m*gCyG_ zDz}?!dsQRj>__?+)OqsH-&W&->Y#aw{`YVJr z)iV+Gtaf~Yv}!$JIkI&4?~sGFq4<$}KmhuR4d_)_D`-it&uglCwTl5-u>L>;r0}FCIsYFTAZ< z(}GoK*R$(7b&P?eQ-rRYp1Dx0jriG#KxjfMVrt5GQiO_9g8u8QAku#k57GUkXNU&*cM zQu1XqG}Gi7=h?`(Z3(^Q_dd~hm{RTql=t)NB}*P$Nq(0;O!-I&u7VB0%ex=So?em0 zgS3UJ$;?O+gh7_?L~E+(dW~}O%4EFe$vEvqig&a4Vw7Uy{7AoF%L~*!u`^SzI~<)5 zCd%d%Bz7Fi%8k~5VHwY9^=YH5K$%O`E_2ilHua{Dtfl{d@(b?SAJlK9V@r%#=DA0& zdiJ4KHAqO_WPX!>+GB*SF`8W6xtDYk_9C~G;{J)`?f&k%gnou8>$JCSv&*tUl?tJn z8El;V>w({6G;(>9=@)yk5e$xNFGdfm7U-tp4U1VPc03Q7qqc3X%02J4=)Cqbk+1QN z%jD&Usx|=er832AFdZ&43l=rT7wKyTUe&fSGGp#a;!0x-Md@hP4+=Z-jSUN;n)tr? zR<9_kgT~LU-D9DaIq)M1MvIGLkV@Z$t}6C3$Zw%d)#XYtXEj2wBKw@@v15f|Sfoc0 z4NU04Id$_Gbt?#{Ts6kKRdmht$GR8_qoq5>$3Eu|(vYoxNmuK&j=vD5s-UHKc=4c) zAt0g-{GvN9S(fX#SWj?XCsr=`!}8EEkwexh*+vn2RYhrhyb!~n;&`||JCOGshYaH^ zE8oy~tZe?AvCZ<;L=E!!n&F{|H8}}guahM&!eo`{0XRk|NVqF^O8bhIXLC?I`EKU; z5doY{mGv)3=b3te#?V(fY*@$b@&{|WlfF-8!ZOrx;45VgR6RYPVWc|rdDIBacrslQ zn1-<2r|&DEoT%5Z)3`0gjH5P$Dv}U&DY$-0UyF`B+_pQ@Z!I#Qi>FAcjScn^tkT)^6$0rkdw+kxluFAuE7g@eMhWw6!B3o>{kgZh&Q} z@qLEJ+fKIIRG~o&1r`mBa{N`wL-Sj`NdcnuNJH)`36*m{XP7 zZ=hKkr%QGD8b&~y(D&5QTza<;icGUD?$stA8iKkz(5?o*rbKshy@#|~H?uQl?$VtB z>cFW76CHA8_qjw$T$i+oK^?+w8KK51-&7MhwHet(m+(Hqs`j_kP2|ItIV26zLiy>4 zJni)?*>@e zwuhOUcV`YL@K7s*qlIX|w(lAZ#jO|XtUCCGxoObxCUuhM4aZk|iD^v2W|&uT7DJ?f zp4kPewHNw=fymWN`Stv-TZ+sygL`?+beeV;Ji4y4bA@vHfxKtBfcMM7*%a1*eU#at zphegbkwm_~7hHEtQjxd|vtywXhS5-)mV)Lplb*01yLG~^8I2W2g&xlbQWiu|KVyY$U zg<1{-o~H4rS}o{HhcU?)*r^cf4vB3K;lX_Hgj8)2*@@u%g%kQ5H;dH28uP;+?8CJN zIs^ZLSL!?|aSXy!=8(^gGwcx?dog9v*+L~npPlD$4d1bvK9lucjrK;mM0Fgk&^4X) z)shV;={lzcFpb|k%YS>d=aX!R%Mw!GDkn29Z3kmMt@&Q@-)oO!k**PK!C*p3AfIynIA2b*A#nNlwPS5ECa zlQ#z6S{Dx=&YkNqvDoi4s3BApg0Zj5bF*l!=NjuxgKtm}K5eshhwY}*ynYK=B6x)E z%iZaBQo$6bXh6#PzO`a)!pKGGDHd099HS?S#b;E76$i8o^um)~V8L{?nU}j8vy!ZpA zG%znsL!_p-w^gF8)_vsC`llD_XGNCr z&>k${SRJaMy`Lll2Nw{pc^V-o!1`6@leT&i(L0gUR892a2R%!2W0fcyFl$>K zjAh2W=AoTjkmmLwld48;Z;^IOFY|0_{e{O{bmk^#-ZekZgb|#J8IPH-wbSA)uD>p7 z3g_MzsGz+1)_#HglV8W4&}{)%6=b< z)^w(4ud?RbWdayc$T%U{Fot?Cn}hJoQ=ksF=B^KK;%q^{r6<00_=YYgu20(5h4ZyK zsdP4&&Gcg3B4AUqpB+*Sd6$YS4XFX?zK?V}?u7a74R&{5*(3g=U?=zMoHG25bq9*A zD5vR$-H(_r?Ypx5H)A4f4z+XO4vqAMw#}GO_KsEVZPeWemMoL#~IqK0f>2!Ny!aS0&@($whKZ%+La>S|2lVglM$Xf;$-*(7Uo+~Iqni$pfI2;eR~tDdky zk?jU1;rWZvTQ)ATwbCRK&zI)q6_QG6nGegj2E;_JJCMI=1Jz&y zse<-}z7&#rGrX15R!4?v+S{1Sx1*i)bmd;V$O?e`KEZ$Q>lftwOI`?(lA;b@%=yPJ zkWOd_C-(99;f#^0Os5hir;F*H7sF}rN*tkiR4O1NvSqYjkYyRT@!h&xaRB}m=J~tw zt=eS+wN1O}=u&TMw}vI}ONd^uJWqhn>R)3xa9X)aKpzmaSxr|*UTlYrPX!N=sj`W0 zn0r+U5yXIoqb#)NH+@_-#FjitzgGGjz1!F>PZ1wlc)TESqauDy_kgtC%HFMsTmj+t zt60fndyPjd%f>WHp?O9QVGQ-G0%aoKU>sEZd~M*$zhIE2DIKcZp_Iv{8X9y5$r!1(r*D}DNs02d7?@lbC z&MB@*-cz-j8*{5uuXX9sJuE9otOr1x>u4dCwTf$8fhcZ74_1JmcDk^RBfXmIMWPp{MmyZ#PztOrIaxm!Y{ja zuG6(OEH&g2Q=TBEt9M(m0|@O@_#C~3q~~Tw$%(xsYukNmO_lF+wQjScVmTn0c+J@5 zR96O*Ztk$<%tf(U(o=E_pt+N9Ij1ArbF~m|m<#H4s&r0O7AuSMdN?r;3h$aWW*UB> z$9}e_rh%4uyz&rrO^Y|fIENG+keS__Q}uSQqZ*cBOI)nlUGuy?LGOoz zPAR(?NLScEg7ywu^*uoMksN_>kxsFpNwY4@x!dJr-SUi|nSDtk;k=peLX5B3rQmpg z!=%|aY^9KjYsL^G#aTht^mX-rCYEodX5jmrK_Os&vBf`$^#otM;rTRf{&d)Bpug5aJm)K+WTVmg* zJ;Tfgbq*?Z$>X$loRH4NMm3K>;Ca~`@57|mS!N<8A*zKp^L@1qao5cfJ;k%JuvrsD zo9~0hYP{&)zXQlRPqY|0O|XZGlWB-7^g+G#TZ!T(O{pG@An4XqRq8>?C(42x6(uRM zM&sEK&23kx6b<9_SvSS;et(ZqWM9m7NJN>;X!UCppY8nfvmG{3&_c42nhW?%ncDxn t6w4^yR5_OyPm_+heC^_0l_UuTY=nf4Jy18fnIs?acO+AV2?3hj|M1S!?WX_$ diff --git a/integration/mainnet/debug_getModifiedAccountsByNumber/test_16.tar b/integration/mainnet/debug_getModifiedAccountsByNumber/test_16.tar index c2fc91a77e2827b636a1c7e3b82c2186c7207e38..9b8977771315a1037226ad3812432cd3a17878d9 100644 GIT binary patch literal 32256 zcmeI*$*v~ZRRv&={S>JjTUPW9BS2yZ3}9v%D_XmtIx4e-kyY=`xBjfgv9BdF@c`j; zQ+4y+_#@(+-K@R#+UI_}K79O5JN@$CKYaD&&;CMx8h@r?_*worPvh&KP50?*f76eB z_p^3vyJj4RuJir2pZ)l|<}dU-kB|NK!^iC#kM!}|&p-d{tH-wiq2f>^lGM zIKKM!%a0%a{=fg}+b{O(o8Po!UVLnR_?PbeFJ2$$Bfh!r>znJ#Xqrd-p`}n(me%<hQZ0(Or5XbuF>%(Wh?|#|*v<9B~ zcVGSQR`{ojzTUpszIZ+NkI(hP;{0&KcR#Ms_doLK6@Gf)H_P&AqyPL9f4@Xl>&KhF zZ+_pk+H0_s7@{_qnapPj7Ayn&0=`d>(h-PUAA)jqPjh zt)JF$AMbna*ZZ8izHOVIcKLPHK2P1W_x*m3)4K0X*Dv$f56jUH^L=@Ud0OsfU4LA# z*H!0n?%jDT(|u0!(a&pt?8iN}$Ferdvft~~PVF|%<*Iw$x^eBdxw-H5a=&rw62`V` z8J2New(g#N=<}-EQryEaE@wZtyRVkDZM*dtIyXJNbk{W7+K=TbpJ)Y}^IG<`^*;AJ zEZec|)4JW;a#@Mn$6U{=mT77By`RQ;Kl=VUI$xdbvX0Bp&eymv!*%sneZu{^&+}-u zyItGv-mHFq?Tb~s_iGxubKR!9z3=ULuVbIKZQX`*9M-A5mbSkdFXQ=|<=W1%S%#w< z?y`pKG@jEw9(!{R`!x^ub+-G^+P*nwe8P6<=4;uu=2|Bvc+5w8x`oYdHi>0z$D7d&W#wCDH(%?zwew;f$8On| z_3Y)JrHgf~ht0Cc&wJU&`x@4@>CSy0Z0@p5rw6^RbL<)Z!S2UyKb}{OO}})@I!~u0 z*EZXBHeJ(h$9x^j(ye{hwrAI$W#y-PUiV|@JOAIFdNzr zc9{D18n=aeHrH~k=h+U!aJ1t*^?iF@<2{u9I>*J&j(xM`!t0v)#>JCWwQ%fgTZUP@ zsVhI8!_f@Oy>QI=>gRJA%OdzQ^O5$}abJcW%7@=#Cb$ zVIuywv$190`r{a8UNj0L&wGt6N4(E=>)W|&=c5^8ufw*Akk@f;<9Q4+ar?Y#I)>($ z&U0qwKgaIa@4mzhcD+xwh?_Ll#7_>*)a>oljiaR>gsi1shO+WD za=%)uG%fsmIrib6c#TI8;r4lGui?H0mgf_SF!!*`+c3xj*F0YxC$x39FxqY1WL@WJ zJ8PWZ_x;$W&3jq<>Fx%b&89@5p*@%FI_B-#?(=ys0d`vY?%GAdo_k)FeHIVrX5ufl zapdCTR`zQdgs^eiIp@VC+k-!L>nu5Wwu}8PTxeR$YV5neyBCpmVOPg4e+ks_j@A-?9caV`mVbfeY>s0aqsOu?$fo7 zS2M2X!YH^+JD*dD8|@^}dm`(_jLz=5=X;PzT)o6bFK#Dvlz7<3em3?e$g=aMS=!;8 zx~0nv#haDA%y(BjWt`6D8jo?mB*m_mLT2{AoHD}N+rE44I7`ju(|WG$z7F#Lb}wRg zH(JzYm6T?F*2&LHyPV|-xAxeCzU5e?a2MIu*{TVJt~>1aBHtf<@f2$z!W`Su_}E?e zUJ~7p>z@=x}(4wlDkD^>b(G?diFR zGsE7?Dq@E{m!XEc-@K_ueC~4Zlg~zww*rxt*r6%gu84 z&2`*@=(x>G-$*9=dXD>QW#oq5HaEiP^9iSY;$7=4v|PM!8HLe)8@SifO+An4ZVy|r z=$znx&Es(#BGP!C+s!}(#A9d!U}e2cLM(9u!i8+)ars}614(A@w+g^pyYQ7uV(Ns) z=M%D^{OLw;OXwVSo3LMJw~RvqzzSgYma)W*c4Gn8Wl_3zm+mjy)a~0OP+v0#-9+ti z>B}z9ZL_lgCMbt@-Ec{>!_vFFZ*I|Uz88R^uHn}2Q_C`Ds2wa{8fZl_zv9bM;NAA? z)DK6AHqB+hn@Mm}2nZ5OcYDbGK02m3HP`Ki)IpLWK>W!lEeh#w1NI!!Z- zwAsExDtoTs=?%cLV>zzj?BuG}S_6b0t6hZ0AgQsdX!+a@ITkn;3a_#2w+@>3-&Nfr z+_(Li4?0R@JG0R3EuK%?y#uVxu@0M$Upo&Am~QI@i;d0YH(??^{dvJX@aKYm?1Be`y<2$}@LfuFV6GF0+xP*NE@%tzqcGiedrX}h zZ;bOO@3pP!ntomwv5SU7$UJTCG<5?wCyv_>)K-a6GX6LLGO3!6%UyV(9DYoobsUz9 z##yUvcvmHuVu|&SlEd6?jamx!2xze#+bCz2{pwX{p!FFqX??z+eSl-bc`{_-Cpc4S zR$>&_5)TFD3$m0LT7h!_RyuihnL!?QImq?JHhPg3kUSZVoYY(bnkqszvz&5Gw+zgx z+WdLlg3WoI@c1#`c9y9N01TgPf(*$hqQ~=IEF9AygxMWVdG4!y=OoZbe^xPyq8pUi?_f=3NZE82{?7@y5EWiIZe2l!J~p4 zcIn!qV4Y@S?P_9C*z&2>zVVCI|`KVPOFVKxZc=0Wz&&f2O z_u9b8we34VK@eCP_9Z5WY`1k6PUWZ>X;H3flwSQ39AyP00;_$x(>`K*dSpa1_?UYv zIRyhGjH&U4sc8bf;A##7Un(7aTowm`QFd9RL3hFY)-hIpb+{gkmTWmOVGfEPP)j}E zt4UasJ6?7%K`eAZ_VL;_N*GH!7J3 z=OA=WsIm#(P@ALof+x{@w-Nh)sEAPeevdf|jR3mqcU4qTJZyr(IfhuW5RCdF2*18%@Of?M6 zaX{sX{p@B5$j=|FW^VhCj&G`LF;7Gjl(2mrVkj?#*b|VaMzMCbHc`D@vN?Olv8awy zUcxy(88^y?SHZ&hU{ z%kj(}8rW~t!!qo50}!;Zfs*~X@?Z&2r|glu83=Z)6M60ibA$|BryDOLB^C5_Tyx)r zKHyHM8zFu{>vti|B-RvXAcBhLc`tO0nhi~O+uZR^)xrLIJgk^4Ee`>TTjd;>S}QyF zV3m{lLOEkap=6i?tr(9{UsY@)9HN9JVtd3Z73VlU%<+o0V~(xLQ8KHU$b+eS+UsOI ztJNFFz^HLu6>|G_dBAaO=AlG3IJJ?&PzT6NOpV79qYwj74#eZW!1=M2H5}LBOoo+7 zlns-WwkFn2Wi8u)ZOj-)S9V#8;$j!qh1mxXkrF5l@wl@KBco!~GE!eWWkDn>K9k!< zB!k4IOq2Q0E-jnaTulOQ3I^EmjeEZxGUK8~$mzr?6STZvTuZnsOF3t~*i1Z;P1t3b#3298x8;O^N1<_>zq z-GD^$)-cLWy8=p`w5TbN{ZBh&ZuWLncAOf@LkH<`dd&nB`f|)Eqc-$-iBZbKRGiF@ zqrmR~QoiJtk`dxrp46VH{S>S|B)H6zYDkP=#XjLW03)$ zJ~^g%!WyZ;8UzA+nb&z7-(+)TDANN)wZB$=99x+$yn8Se)^(>kZV(THY;wc!ofK6aUQ%G7~ zusA58!`kbPSQdy!RZ;RX4iqPk`(i<-%+C2Sjqs^@Q@3jFJ+rn?i5q(=GqeyULsM&Z z%UzXCj3HLz92l8m&vHIj!xCvRfF~$~)+3?+VOUIp+I|wM`aWd+TCj~e@=zJ$b<{QB z`x?=}4X1@TK;sCnp7|;(pUQ^mDBndg6uV@{Ruvo?TS@BbEOb__5CvuEWNK6=BY)9M z2H{huE0xjvP&x?K)Q-=LVidD^%dX3Y2|UvG!Ury#R2!$m4SMF{UwBRIEG9`OF(?mn z-OuV|evdNJHbZeCF3ZY)PgCR{BAlEfW_VgW3Oqk^$wPlt(0r~u>ZngcWRW)xhQgYM zv)46jX0Du&Bc``&VJdhLZc9~pqhL;51l<5+Cq8QJL~g80FZB^!_q{^C%ezs94QstMaOL<+MN+fWmUXTlqzLqJa#+w!-3 z)ZN#BegC$v1!F_ysw^#76&}Q|{iLY{v$hKM9cPEQ^8giN>*qoT+Rvx=?+)2((X_SW z?f}=buTv>E7K?&vM6_G;b`NRJE+Wy36N|)1_n>@ev@jKl@`bYqG2=X^=t3ED?;e}v zKj>vRU=V7bYY2bBt?C#32OXbeKPwnUAGe|!V$1lRH=Rd$K+x#1ci@9&9dic70Y=mp z1yVCyz)j^~^-Y+OFss@fReI~mjH7#5G{q(85Q>3+&nNU_=out2s>)kH!CFS?NC`j~ zE;)Kf(pUET00q$-4NikZG()0x|H}pe^Ht62PZfbI^xh;<*Jx~od?b>Mc zds=?3ul;;Nk%>`Y@+XuC)e=e@FQ}60g>3aE4129_moAir$2V`idA4Aca9u>iHb=bfmpbjvdeHZ{B8I9;p#w@L$@vvQIo`8 z*g2dEQolq|?oRSS>~JafVQ^%oz$+txrdZY^`kvIq&oxYG(vy{R*il(HWIOOB{wM$y z-MIM+3cSi19>5Yl2ue}@i;aSyMzr>TkiEbg3S1$l_F=87E|7X_!vqD>U{j(Zq=?wS zUJ|((yL6Wr1;6#XdxO0&!B3u-?0@;i_m;6O-tL#Ut61nEF#wL6Sk6 zPjt#FaZ-7&G|RV}sz)fL_7dVqsstQ?M3aJJACwr&RH{)%CzSKD4Tgs}5YbvuI)F%o zXdvJP7r^2 z%MrReVBm9?@$0%RU^I!6xX8S$oo7WEzGxNxi52KdoKN{K?+b;A)*8f^h7KsuS&~Gl zX|!QF)xh&!@D(sggP~W6!r9zbWvwzNt3ZHhJA(yu3D2t-xVTHW!4g#|i1PH!6v`M| zYbf?a&$*QSS~f4x0g%uIJ-`g6gu`=PpxZE}E*AC5ei09$M0MIuD@9~!=^$!LCwTfK zZnrAU`%-@iMMs;IAJD7)ENrG^0MF?Ijz|)DD2(-dFQOOCSE@8Dy&q68qt#fj%r}QL z)^&p}YwrfLs5n+&5Io3^V+_NOh5Ctrj&I`urm8+tI^DDAM^ZzcLv$;&M0c20X9!HK z+30Rcc0hqBB0Hgy(1QdhZIDE!$PndqzyY_QyYhT5uw1HRYA@v>mImL=@4LId zg7?i$jzA^nvCw0ty)u)f$46*}rw zBS8;j=!^AQEPYTSorZ6r)KQhpH#|0^Plsi{d3eoj_*geIgr<)Jm z=3V*VuH^}FKnT-x?>nlBa71K4V700oj#n@(Aof-AmmL;xdp(OrmqMpeX28~{pI~!% zp7d0T9*Zx7Tly7rQy1cw9aBUCVah*sFO`F&r;|V;5uTJn&#M4~t-~%wbk~B1;`ODhwtc&ns6xL-m`t^5!;eWmVTIsby^07}vau;9 ziB-^ts1CUHc`uOh5K;7OEd%RNaFZIykS#EvJ`bA{)n2(u^-1L|?@0_WC%FQpSKopg znn!MEjc=Cxyb5s_A*SFPhV+8)v=r5Ric9#JE*Ys!6H;t`^wDIdTnHtlqFfF^hq*_9 zqjwZ#r}>tuJ(yAW_yS3eD|-}?8ye#xa=gyLtAL?xFL_yWM|KUqm8b*{u6+s~AOLD! z18MbiI*i4?00V488qwH>VOL8M{td$pxF?Bnn>{1;Y|e4$fD3i2;%`&CYDjFWNCFt;{1!O zCd1agLpYX!2>QiC!R6c{OcEwnCgTs77$UBkVbgOJ(#R);c8tIl;}A$dSE!H_Q4eBz zGlRg!(G2Jaz7nX+o3vkrVi1;vbdsOWP0d6wB=(*M+Z+arsh}-aDJvi--L1JP1g-V5 z@?-+o1|2n+E#y*^_ef*O&flH;BEjo}RZT_y-rlJ3EK7PZdL&Sh{&}dEf!d!nYYskb zUipokP#9sAB`f5Fl|cY%yht{Rm)DwH1$~Jdf_X2+MQcV9jHi^+ChiYZb+@(hBWv-L zkX|#r6??-iM&J~JUA)CqsBN@ytu2M>^9kwM11d=Yp|kK$aRJa!1`?{kXUGC=UhXv$ zRg;%Vq6?Q@b;v+JOeI1_9F)_y!*#W9PCi9dImp|ZWzsWoh)+i32?JhBPee-+#D6|v z#3US3LErUUi9`o^f~xU;v<^D4%HZ%7&#OX`3E7i|NWj@F+<=#8VUZ1keJOABVfTWg z)T|MrQrc>izqmDYGD?;bQ1uGM33)3J`@C0HK7Tal0SxtxVuh>+re(9C(m8?#SFsHw zIb0wa1J;q(cAXY@D|sn=le)xT*X-q9c1wE2_8^mRH0(-aUOgUCLBlNEquGi#Ug^ED z&vpoi@g`|3>r^lRmWw183*XWnPufD4+K&b|I4sOZSQ`#do{&uo zQ9A9aTMw76v7 z9$)`)XJo)gyadzW01{@49U`eQx00`RrC@aE?@CBMgPjT7G)%S`rh8fx7ewT#^e8{1 zM~pt+Qo~D9$v{X7KjcMK5~41^A)yrSU;u%3K|O}{vI1C?v^X{6_`>sE@Qy6N0g(() zSY&sea=~fJvl+_KOrzj0S4CdT`*a(;!6?_dj{ZZuPF-Z*u_d^>;>&3ds7G`i6ox8p zrDQmA>k8`OjC8b2<5cD6HeTX7R059ZH7o3q^+`s?(bUcL=mdgpeB?93?`y@_a&xxIxqk@cPiG zPS8T4utiZv+7a;i40J13;S}Rw`Wr$8r5=CtsD%DH{hm^r7>=l!$Z8!hxIWxO{ZB0> z#TgVcxr84epZAo2Ma7~f4qmf1S|c>9)GBg@r1JBLISwhVNkWbfH5x14q@DQQ~H&Rhw)Sjws zLLwRWpo(}g2}%z9z;g{vQmQU2AytrwNrn|diuPV-Dr2KQ2$Zh=^^!B1qR}-2Pecfq z8)&hw>8&F>x-QjXh(tgPA#b#A7Av8J(9+p-PccWY*$? zB5kkePS-b^mlAW{Fcm6mE%M-hEm4scmcsV9ow=~Kk;yUuV2bMbgi^oq?1f&D7V&_T zz>%j&XVxiF8fm9^3MyIDR9%Qj1~XYbPlWZfe4sGPg5ldO|MOnKO!5#%l$JF?Sjy%< z2o|0CnVeXt5|_(GG4I?(#3-;P z=3sCQkq9O*3wG&wFM3ZM-F*?@%oX8l(o2XA47@yi06I_~X{-4Q5ibH@`$$=oTLQLO z51R-NVPXJ!LxOmu=e@M`GI*t=7h_jX6+YR$AP1%>g5{OPRY`Ye;2_!@% zIz_)8S)}2logx=%*d02WZg6)MaWjW!@rp!lOFrqVY!*V0y0d;*d1v$e|ydqWLn7*tAGD8EKAEIt$A5#n& zWRtgSx;6x$wjx+o(E3UthwvRV4)Rh~~%$B=0>I(|}h7z$6Q0fo>^ra%q6)S4)#14L`DhSF={ zBaST)hD9p@R2oAJfB}^U&_>bppVQmdJme`IVG6w*Z3zNceJR%p#nldNq+T@O99r_DxDFUPEQsAGT!61s#<;_Hr*zNI<4z#fsn)W;lkA z0ST4Yas~bzn)lvgc|rv^8V%VKL9Akcc#$o__!1|y9G9clt9l!gQ`#-O-Ud(*;HHQN z#2hTKz)ztq^lKoOC*&x2JEXP#qg#oa?h7vJ<|xUB$h(MSDpv`>psMs=gFtOA*(;(9 zo`D4v0}^60QJ|LEA=8(V3Hk&nM1^SPMzhFlA+Tp4KTIF;yjB6o9;sZX6IpA~h>GPu zs2bKxB@(6E5?%Mxzw~#=8W0(T5T!|4U!y>teMcuE8Sc5pnJ4{(Le711J83y~*<+Bt$S(7f(4Iuu=~Y7kx^zDyxo zPjrD#Gc~C|0$PBx=X>!v+7%*6!+x@CRvIedqm)M&NWsnF4tV3|Rb)rz3=pcM%Nc#w zCFj`#PL^K6&m$x?@hkVm*)2r)Q$-P=cK6=Y7+A!0(4xziex zMJRbHRzzA7Wm05Fl`+G>36;_I;GU!hwya4^+b{FVHh3ROa2GzGP{>GHIYSE3zelMc za*DS4xMu2LWsD3V`SU6TYCo@gXY_}5z#IJ`G9OWiqIXsY2!v=i zhZSzfJSks`1`(T9^-&rT%26+albG0lCcdObm5D9Sa8ZW=5l){^C_)n+sCf~EYF>y$ zhDB0fsVT)@Z7Rh|WniE`p-hn%PLCa@9T-IcWG!s#L$WS)ZCMQuvj8HLhXS|iOyHpZ z5^KmUbae-9!dtCnkWZxSM-r&Ok|U?Qa$-Z@vT)sjT;IHzQr*N za%xh`3lxA;s_4F80LVoMTctTaD<%T7#$1`1Sf|u!g=D?UzN|bTsD`Fcb;^Ny0e+|5 zfMz!}wtXKmcZn>lS0O~xcT&)W{;(E?A5{pzZ=9Q{K$%Q$@ow8?5 zQoY%9Ji&FHu3DQr&r*$xUWBqq^$>{Kawa4+)1oAX+jOK9#LxH2KH|(GbQb!gS<8*w zMA(s82?ja-1V}HLNb>_}LIp>@spYQH!m4B<1csn>w8^s;?YAA~VrrAqd4r3w* zbFuj&jcoWxc&nYP3?XT6>iqL7R%GF*eB58Qgf7!W&lpd^hhgOO+m&fjk)}n?8k1Ke zqCsw9;W8>?hV;1_%E}BX;omrhJHZzw#nNWh%ZZEwJPl{}yeen%g+x>`0lfrEnIRAn z^tL^_15POEG+dtd61~I|Wv(h#8i@TeWC(ze&NP|cqRN2BJg?Fh5~gmbK58Xl+Dp+& z0sc1xs{|B}NuSCtd#H@2>C;ili*XRJ8Dr**ebL0$i{Rdd+n@JR&&Z895k)6kdW~)M z%qcnxTA&z|(ENraHe6|owN%x`j(-~T&2`$ThckGqI>md10(#!d2Vt@44t^rCk zeI_7X7e>28qy%qJ1JBiPJ^}52rchE48LE77QG{&i-RH;|G+UzL^D0zT#0Q8V`a0o& zwo)#NsRtM72XanMj7(oJc1rlMQP(J`^aAkEd|PYI(s(}JgD{Pz?fG6W8OERgzp{oI zx;8$MTfUB}!A9X>1NY>93+V1j>blEbCM+;wS%ift6^N>B|8jz(;uk!nJ5c*6ctbA~ zg@=z8vL$>)LuAVey$ltp=@4CI<)I(Z){%KR&{RU_fCNv_Yh)IXhOA2ia@J^rXVa=x zt{U3M;b(%t9DK(1Y`GZ??2jV1*akcZQIG*Cz+mxo!Q?BFIdi|vOTa-0VsM@-&)AGb zduPCg-b=MHNE2RG{6TPSBn&xDs$8Yhpjso5VhJXT41p-g1puiolgLTh;&{~`YB}iU zYfxa2(f~s-GnVjiIWN(PUp6b#0?#L;(p8lkOjVv`CRs91^8gK{3Iq(SBT!%aL@85E zSG*3~Vuz84hT~1Mm@dM*VoP#V+w)$cymDEk;=EnlEoqQSjO!~M2n28PRIk%epHKMe z`~yW$kUE@p)F~1oYL05t%henK_Nb~#6y-#rCOBsZC_PxrjzKu~q|`=~kQB0vH<(TF zk4k@?6r`)4Vgq%6RMJ?*U!^Ij-&{|sNk2+c9(&}N#T>?90W?^XljWl@Zea$tVo?)n z7(PjhQj^v!0Vv=s0*TOoV8rz}$xKW=R|D~7i$^T4uB6N^jCnAvt(*}e$0i_BWYu^` zrryok<8EA1NUK-TKVUo;0$xlkIWVO(ec9&$JUmGqNwJND0qLG&hZGEsed6`-U zeWKLMjHp&}Y+oYN?K2N*#v{W44ni$YNZf!jb<(NtymU^w8xCC1eA5ZAptg~|>naS4 z;g}rbfceNQ3!G_1xLXKB}y||bID07NI&J>n>G8XJ%)m5Zd z&#P=G%}c67g(S#i5p%wsh9zgnJg5vtj9Fr@EkO`P7%kFbRA47v3JomvdgRZ_Fm*yz zep);=hFEk!hf=Y|RECh9`zIMr>RKd$e=FP6E`qYTcO8D5V3oe(4nwP;(wGOCB?RoGn5=o~VdF%2)Y74^O3xzY$3pJ6NB zPFeK4$_tR?=Ah$90^my{j@qOE6fsi}zUnBj{drXyCdRTOdG)!57IL1$qF}R4h)3-k z;RBp_Ugf|JKgV~G!w@7@hz_je&)E{PL}OOXC!SZ?U1~~9ugA$D47d??NDKgBqrNmT z_^Z*I=T$5_v1TN7DVjBY9L%tkD2ff!oy9n6;p>D+_c`_jAW~?M z5wH{GDDBD315kTiDCy_)nlrbL& zJx3r@Wth}A=#qmuos?H204WFySM)18LrT#F<%Df)oe=}9lR;0kXkB?+iRpW|Cu$w6 zAKg=LvC;ETO#&gVh|QFhM-+gQwAd8lG&;nHbP)<}CyNpAXrVD_hvt`@lIWt3s1o9c z#?*y5vROa^-;C#>K!`rGYRrIZt$1Q+SQ|#OwW#IS{E<3`PZhB1UsB=@wK(O3EglxkC|Qdge_C6+D*q75k#8dsH89R1ZeoWhmXAN+&oJMV|Cxi6EHgy(m{{ag0l+ zQjk%HoFgA9TMQEO_4q=L)@n3ouf&&=q{%3q*xb zL-7*ZIk}7EFp?5BEc|2;CZm~@?$4_X$>lf~Z5Sjksxb!?#)(Os90yDW>hN2Agc5C3 zgCQH8QIO+1l^B}zM!Vocxm*>BuM6o{Rz3&?r>^|gd7}7~n4}ZKCzuCFMVUkC<6F-s z6y+IqRHYiC;{#H`4f~+yMsBpKVcxiOIrE6z$ zLO$=M8Lfq-XGu=R(K43ojTtHC6v_V3C+C;Opa{Oi8fRf#h&WUpB$nRJr}T=9{=;b z7%m;A(B!ZMy;of&0s&}1jT17_-pOQmq~}#QS3=L|qA~^s*obh*(QvYs+zh{GL`K$K ze)&EauNH-+@NzWt4wj|6fstOn)q=FRo;ZODQavC69ov#QI8f)6*X@+PR{|TJ&8*<_ z3H1U+G_yox%jsTwb&Kz`3L43DSzMXWRwB0gBr_h4P6N15OH_`!Kk-vC9oql=8Yiq- z@nt5DNy$O|zzU@Y*B1y?0BM4S#I=WVT*-QbNq9!Smq9kXi(l>(6#}+6^X+hlB!fpN zPsl;kF^Wf=1cdA+sw1c{qkWc&-K_I0xOD5R)}?zHJyUP(u(`_=W$@ae8%* zLx4co(xre|iq+R3vNFgleh@%@a{|_gh~arJ@D$q3)De9WEjkj(Jh21_38OXO9LSA- zKCcqYyEJbDyEGC32gDi*3X>jr`RhC#)Oo?8Jm2F>8Y;@Dd7PS(V_T!G)&s^kiNRVW zB@;z!Wa|6X3*@>vEm0p`AKQ;wQliVqf~r}_nAqHX-cmNGf zucR84?4R#N*r2kow8CasuUM=89CV#Fq{cC|xujd7jjpJI-FSx+G0bC{NJb?9Ianx3 z6QCl)u;*npSP>p5Z3J|06rgEZqM!vEHkp!&&%UW0a=;e8Bxkf+_n-|Gl&GOsl_s#1 zr0a`!c)nL;tlIw$>Qf7m0BCt}5;5P2KNMU!mzT7*cJ>J!rK+Sx=rKK6-l#dkCRB4E zh(Gq@F1BI3LDLl$!b@b{DQAT_Q4){yqFJ%}kt9?tu|tCpC%U3!sme&{8+rvDbWvnk z1%wbL=9I|B$js~GhE#NTTUDBKE6MZeMG^Ip?)aEDCm28s4iE$!P$J1-WImak1Ty@E z*~Qd|0DIXlX;6hAK7rUdd`KsWSr{+LYiUA|-Yd&$6(H}RpFJezl{JBQ81Niqow})F(%JLnhkgavq>a0Os!q9d$d1 z68zGqRNhb9z`9ELT5Dk!d&iECFw}>2bfr$WFcxirPzYH5lX;3{bz>r+6jPh@1`hA|LYh0z93PN{nbB z$NnTwN%IAZnzd2iK^rL^qfmFG0)vWsPPvRvY(`bW7sJ*DAb0AkvP?rQD z0zgN!49G9G;d{=B(e_qW8~uGhVK!zoa(d$l8x~eFuF9YG2!Y~2On(|I*-8h(4 zZLTE_fN{}0Wm&__@{phc($Z)+UdG^e44DqXGfA4fLDQ%rb5kphf59ILO~^GgFVw0{ p9ayD%=t7JDF#G=bt5$vW-Oqphf8-Yc|MfBdx&nV)f&aHF@ITXE1MvU= literal 12333 zcmV+|Fw)OLT4*^jL0KkKSy08$od60{|Ap_=0DxEp|MGA{R0{w1{%QaL2mlBOU^%|| zec?sM)euD~LqX? zQdJ@fAySF%?vPSK+f=q0RV76YGDT4ep;p=;R8op-0+0n&8!{jY3O9sRKg6C*88K3s z5Zamm0MGydpdzQ>i~>@kLqVVb&;SEK0n8==FalvPnqU9{GHIY}t-uwFV%=;V+Z+pw(z@SW3B!a7E!%s}O+YJ@ zq^}uQcX*G$9-~=~zeklvxZ*AfsvW6IT)Vg)#C&Ed2ZJJHh?I;ja6+w*Ld3EVV%tXh z+-sS5R)Bfk<38>1sxy{xwj$zD(5xPZY6c4UinJu{>Ml(Yv9-+sCVF9~jM>x`?Wl3u zM~H!@A~u~_x1Jd~I|T4mvGup5jY4CzNjQaU97gWsEjbz08kZIba-XqB-Iu&z?>mOZ zw2x-> zGzQ7yRQRd{31h^cP0Lq`?@qPHR=HL264?PRo1<#14_Pqz&jJJDP}RTQm@2(qJQUm^ zWpeMkmjiJOmalM~kG6@Bt4z@LX9+pc$~~)qe^Ey>6v3;x9QUQ-@~IjVK|(lY(aqP$ zEc6p4`^hUZ7=}0n9^mL?x_g&?JPhwr?beyxS50kW%z22p;cGH$Hv`P14`~t*Ke++s znjDG>6|&Vn$x;?%Br*Hk4a{E~iN}Smu*YM^`;mr9Ih%8#Bf#eRr_H@(K1| z7LPfTvinP8-uWGxS^RR0+Mde@GYiMS1r zY41H|-sp7=*}QyhUn?Yg1D8gNt&T4%obhv1&yKC-w8x$B&Guj>pUxEIRA8V|+Gc1@ zCes8b%JC7K8L;%+d&Y+o*m`HM;AHS2*eQamzx{Q{G(U|o*c>hsQyhs;rW zlJYbqEO>FT-julN1A1Kb@{f*4m=suZnJykE6p&7vS;M(E7P2;#2Hy?}sQfAu*FJli zy?xf-W4tz6`CjEcloE`TdhNj7E_EI;eEhOZClg>fDg;NHUk#o)c6!xjfzV^Z@T;R8 zN>XYWqW=a35^wbo#ySb7`j!8$Kwax+ zIys}j<6J|iFKL6yRF!Hq++auX3(k2~JJv>ft|Nyn)X&4+T+$GLz_$*&-r_HQ5R*oh z*N*49MrSMAbgX00Uty<(%WuFzKRgw0(9y1Cl3m$w*?6gSAuadM2qoF?x;e+}R$n1F z0W}vjjZ9J44E&laUL$0wb`?>>!U};Nw?Zx|d&2{4%kKyp-$d2o=!4S$QexRVPqmW1 z#H_j)v0;PxVK@jS%z_W@VT5Nb3x}z zbza-umAPl))#bUnahTbk9}CIieiAI8g_%wF8ti)BnxRL1HmJ1t%cH&7>7jVHj3cV} za1SQr?0%}K2$4Nq5Tq&0YvtQ6M~Hz$S&y>zb@^c6xHt}ZJQz=9A! zLA*d?*9+ntk|m(z?t*YPBzXh7A<63Gyn7&c&s+g)H|m?xAmG9vwRKBGG%TxVGn>Jf zdaX%tw)E(sJXlu|mF#{e0ZG?w&C+RGCZfH7<{p^bYG5w(Dmu$KL9%nS7ILD0--f#!A+qYskWbr7VIi^#Vo?F%fQFkE%o6TS@;jWcc?*Z7R2)!=e z=Y3-YKBQiU6l1%2Po*5<$P?_LT(`KkgSAPh zYs>TKqmHpvn&(oKGLEp_UmvEz(#iQiLc@-rb>F>7P47wUuNQa3Q47UEVU> zWO9dBNd4Sy?8_Yl6uTfD7`8N}Q+L;Np!d+?Gx?!VG3ds@*wK@Kr9;xUb#@2eHNwmV zfZgU^azR(lrJ!rw&U1$t7aC03v245P(ePXlZF%KSOm*q4yc0ga3uHfQv@erlqM9D(owk8$&iUQ@$Lq zgV8~qIF25X#4!y@#r0GXHt!oD3D@EWvs-TCAuMw9E#VxGcnIRN==UM<#m{)N3K61Fjp z4_t` z!0=AGTzSU^$%$D6Wd@H4(`3w3F9%}7Iy^bv^Excz-EjzI4|_!!%(_qLk*R!RPhIL^ zafIIXMwVFfhbhi60L%j@<>lMYX70O00#uf%ZVv&BFaqpFE!s&+p%uoS8GEQ!c_$0R z+a(qQ?2JB*AoJa-&2V5RRq4NCkRw*#sZPKzG^B3^;ehvR4t~sM@)oM$z*vk%NGu-~ z5388f&QZhALt)!C%)(*BdI=jkjLkdJhS-=Z>pM?vq1oSjhwQ&+CERe1ofqA@KXD_q0&c4w*Y-ZsB2>oJM`>YB5W%ZCGCpMrZt)4=22PwNii-tc z#~KgW!0k@=w0Y#}TR_My)Q>M=`hFmwVaBL_nk!q&V*<(-9STCGhUrSxGf@Y)KM&NX zoA$YG;NvkStr5Urk=zfwBtk8T!^Ue6@l5z7U>&tvzE(hQ;3v;4R0uCIsA1NY$#1Wj?e!e3jf_H{nKA z>vH|)XB2SFJKG4fE<%?phppCo7P<8-PI?b~>S$HVb0Rb6y(3ejn_|;)9JT`5BigM{ z+@2Pkp=32bT)oQg&Nz{Sw*>(S88|0r9HQaR^y1o)B`Y9{Nc^uKkHfA&yyh5=;}8ft}-f$F6g?Wx#5ViIEpaDb9r}2FNwMp_MZ*?440;{AU$ph!^3`danbnoETv9m-A) zfQ)lomKMWV$sa>6knlyh2Xi~Pn5?j~8v+|M-XO%kZrr(G=b_huVoiCz)*juf5X1Vm z(JPh@?kIalDQqqG(bDuca<>hrdNs4z#$B0?6Hcr0a!`SgVuP16M(lOAZ_D5-e4^JX z#KRbl3qG}U9yvbp6?0DYZA7_(&vS7@WU76;HEi!2l4@}Xk-%}DLosOt`XM{kPg!{w z=A(2nV9Kkf-8HvG(Qt9l0_rcEY#Bdifa;y=aJ8hNY^a5 zvjOgr`JcI3LpQuIs`lTnLZ^y&0%f$3^62inq&U>_GeMlj0!={DM&;#jXd)hR51a>q z&BnU+FSeRsITxesoPidxj}%{S!#O@SC7&clqg{iF(@DIqb|B*HI&>H78qFl5@HkB} zktCZY=J-8n6fQ-_v0Ls$Hur2|I|Iaw4yxjKZLO?FM!$G><_9RP& z?JWHc%+w^*tCV)$HxRCDduSNNxhaQ+qu6$~bgVwFgcL)?EDg18UFm)Fv` zp%R5&SsK?5hj29Fwq)j96cN&vSu?Pv~{2F zmkb~UyE{W*;2&F4ou^_p*SpnJd zz+qoINtT$2*YqB2N(B|Z$n8k>VlNG)mon`7C>4}1cjSl}eK<3?D0-)#B9JJvEX^gS z$n-j;>pAdQFTClD>ol@@`pQ!*kVn}RE!*q8V8#eRTKr5V5ozsqv&-ZiWMFcQ!*e0= zT$i-@mHT{yx(oH|h5dL6^XFAt%$9G38Mt}r9sywH>vXqe?7SF0mba#Dp0 znuCUYWj{+t)0MWK-(5?F{+9FW`qirb_vnUa%WtDt+G!=~>~ioel3ih2Zoyf6D4f!a zHlgZ`aBeFmG3`#Iwvzlc*XGNL?fMw-dYc~$toKIJ9PWd1Fckwdq{{CJAEw@4EL<+n z9_ila0#7?V@zpWazIg|dy`7oAYrmZI>GO#sFUiGDEg|nB`(F#Si#j={9AQKUuNp4l z%6LGk1YuBTZs#sbI4K7^e)n&7a5BJ%ixpZ!lE2%@`G;tw@)4NqR%st>0NL};GOv4q_P=Uuna3y z_&H^5kL`0}CUk&d?6PywlBQo6P}t$qqAb%9UH0Fvm~j;aei4B?_U~Nzy++fP+q0L@ zG?j=T5@c%#@YwkSyAj^=OD$HFriRIJTTy#)L3dNDW!h?y6(U*C8)s6xU8UzfgJY{3K!2E!}r+Bu$17Luv6 zn{t{S`f8lzaXvBp-kF{7Upv_IhQ2b5Tk1Y8PK1J)_-(g{j+|~8+r1P^$k#Nv(1?Ma za<3tKU}Jof%Vag#PSc|C$u#_!zQQbIJ`HopvVNXCx|Apu7L|pfL$^0<7W0o^F!Qr} z(YOpFNUh>!(-$wZ6^{#9ZxVD0gV`O2st1c#Q-;|$wTPb8@LW;nw>w942!&ye^V#Y< zTZEL9==EoF(9Y&#c`ccv2YoCgFJC*cs0{uRkq#jcBh1$#8iw1g7$dUVTo&zjU_YPFoolZ~{Rpp5G9bn)!zES6N*(;Kp>DIz^4LQUF2uvjKIj-eK_qEy{ z=ak6WC_5{d z6pkyB!es+@QaEQ|)iq49*WqQ)b8_QbB&Y(85^4@(f6T1Yp1z&=d!o0o^h$fWo#~U6y>Et!JZd1T!Ez~&& ztuR{1KVmKHr8~uVCFh_(fr?#&$tuJsP{I#JIpSkz5*^C@PaZu z&g?z<<8=F#1uekl&W5AAV$r9RX$Qus%xHn|ppxGUq1Nq_>O1OJwl+vao5$O{ z-WiH(hDL{~d82)yt0}{7*lKj>QW4|Ke4%C%)fC=)Y?6gM{%rG(+Lct+4xWbRWsAB6 zsmd-wjUenHb)h0GTpi0$pMf$S5T)V|SsYuc;tovLEOLfnO1$VtXQz}ycb&Wz%hsnR zaWT@jday~4xmC>|ql&;%P)K&)q1hmzpM5tKMS90;`5udFCmfu=eIMfzslkLH5n)X91B%+^!qi4DSj%eJfMyBJDdzTy|ZvcCxxz zJ|T<-4!jx+Et9`rzKhI9EWqOj5yqd$n|a=$-K`Iwn9Jts*f&rGv5@QK_*reH{p4LA zJ~44a4l0jZtgwNi=ccD_hd1NmUSN>-IGIj;PUnyXpw;y_QrmmgoDe$&eoj3>;`Zfh z4LzEM&R8W!uOd_~eAB1PQ}n^gmlvirWr8KF723pw+ISsvT-a!|2=UVAyVC{^JN={V z^RMJ2-BmBNVNxsixzMz5a9y_g<3?FAl{V4Hudpg{Ewtw0k5pQG8;Qg@KR|Ce$a6ruQ9e$j@3qJ$&GHEZ1L?&e|Tbtn(|`jBy0KUNfjXLA{6@!Y+-=>G!7b?)uxu zGuAZGABPi1B-yx$=~wCc!CY^7c4Q7HM`zde^ORd5p*tXP-YTt>(N&!HdUaXAu1`hg0T6164d#73 zdI;mHt1EMgnvHgO@iEY2=lU0iPF>;S)H8O&N( za7?o;@+dUSkJg&@d#$y;-s#ZKUpIM0uh&TVn!&WbUnQ{v+ji@s_wlg}+&pEAx-aSU zCQ}(3Z)Fn{5csR+M`a#8a-*P=tZcKA2gt8bM?uge_mf&9xC-8Q#*QbFqmQz<9^8K? zlJH;KFVfreP&2w+-yyjwNL-LT?KiWAJ22E-WsUVF9t?UV0jnByXkVdR;hHX-m7OSc zvIu5}CW+~pMw*(aVJQu~o%G)tJs~14!57zma^X;VIpx!;vBc(r72_YIGC3v((K1bl z>B;TU!0VRx7Zky>1uId@hhkewVY7+V=ZT+uQ3>GfIv{TwV0hzW2Y|>Zh$<iC~uquXZLo!9fsrfYKL(I^YNuO?;nMN5%oHyHMK{pmeON=4XE zu!tL~m^wLnYC_G6Li!kl1FGr6)M^UyxblXj*WPPRm=ZJxe63q>~eOZ{TZ@;T!p56<}|^WT+a^2Ws5ER58H6ipt}t*&hsTQO7x^ORLD6rNx0he zqI{kh-CYY~^XDSaFeSU?yA+)pwFizJ~Ia_g-Z9jlB0Q zP1hJKZ5l3SE=u17Oxv|&(Zlli27?}2y=~axk&yaS){0e>0?DeC3Ty^)T8o1c8hVRzoC5=P?iQf#yq~sld-5qO;Ao(cVvcDQ|n|_omO%CY;>rnuohBFV&)7~GGBeJE4)MCQ(XFi) zJAE~VbQu!gR~}Y&SSQB$kK(wlH9jXKfElf7v%ScX%$;So*V+rOntQECrni}U0XN!q z%e&t8p$W@=A$KRnqRyTmY-kkQ8lhOVO}|sAE(IS8rrLp=lhfGC@{=6-b52ESj5$2s zTukk-cAeT2dr?E*70^sE6%i)SV%8tkl*2giPNcJJ%VBOOIvoTp!rp`P3GoTU-(M_? z8y00XonxoQ>mlCR`ya&%K7rO|^sP%QRSp}hG34pExDKHvaA#Pf*~~*}IH+j%bY3~? zmbFUhEsdL4YWvipn6m{$5zl&al8>!gsof2?GD927at9(YG%vC^%N+hbTsYV{p#Ue)>WZ)Jz9X&yn;35+ z6I0=8-0yueoeoj@>II!)h)>0`Z{r)rSo#wP%WK5llo7!CZ#?cW+f1bWo_d zN3V~zgm&G}J94+(J-G8|$iQA>SG_<@+~lVTL!fy*3Co0)GHbNtv+Jx@Q@rvf!>%OW zt~3+df=#c3d$(1H?bwy7eO0P&8wjJx^Q+TE*PUeuwKMNkd0b|==~=Ql0{Qx_#)`0R zQ;4R)uZ@Z2ndPBZ7>SMM{6GjVjTT6zXa^@{@$`QJL zuLrour#7=@PL9ub4{xzdc$2}tH>!HGy^*#x?h^8fndA`ei*=z3JAKAdh0ODJmD%c_ zxf;@tD7w$g1UiwHW=mG_!(o6A&as%ydxrShp+G*SaN0hy-O-;YcT2QjC}wle!ivD` zF>$aARF?s3km?dyX(eWZjxm^|_Y`|(CE3Z&cSgmiRw?VvzPn?Rg6*zWou*N$9`%}? z74yMMeayBQ3G>c#(_`0DiaVetu7wptdfjZ4i*oM$$u;$_t;coAAUVmJN)nWup39WU z@R`jjCL+U*1}Q!D#)uv?&o?AshFb0;HH4Kz1Ud++x!v?5$fArQbr)^h5O(^N+@Za| zM5b3+>_;=7KvHVWw^2KJIDvP-gtCusf?<{C>q)||-Hwy$R5svxxC?S1sH;cL^n31% zhEd(Sp22wRapRi*JaK7F2c`fx_&~yB@ps zFq=n_Q*}*WlaorP(wiQTe@Z0c>x zl~=1lVCk0AoQ4FjGok6`>x%X&o3CTmu|FQ&Zt+;^FrWp8mcs!!JB3sNos1cDZqHn> zHJ zMcqWS0xsQ=P}ID23}o(`j){xgC}}-ay`BVzGi;Y1-Gdk4x~^r8`_fZe?Y?zF4qBg} zPeu{foXuU8-T^%IR1Ilr=56y9W1$o}P{SKGD3t6NHQK}N;O+q0TpeX~FNHOK4^I)R zw!m3!0&!S)0#b_|&PFE3VVUZvQ&Q!xERe>}zSHSTU!Zfh2Ql2G5;`qX7Y7BjZ8KBWu`Mn!=6zaT;WF?l zg!C;gUk76_ZKaN`FuR|KR99Ue{YIlHmp*Nk**N8!j|?@V-{00mGZV|-1<(xSC0d>(5Prq%8+I(SC(RIWe<2xQHJ?DmwnqDHGn} zt7-HOu`9T5X9R`l4V!aZ@5!_ju4?3?*E_GtIMp*nP9rvn^pcOFn$4UJUE}#dK_0Hu zrDiuERbGJKhLb`&lD?iIAqXdkc-aR>s-K1ak@2IxQ@EPfjm51REIcZ}50l z9yOxL?@*EH9E#R@iL&bEyW_}Xn^S8OA;3I+;qii*AB911Ou19ubwi<%>m`T5Hhz%b z3w#L%OG*Ri%wWlGk^)MkuwL^Jeb~-FF!osU)@?2gsY?DXRVXnBgJkYS?SqA~awLng z@3|*khC6H{y~X{9!$K5b*drsNmv@0qNO?erjI7K=e-#Auqr zf^f=R*Thz?)dsL-v9KRu{GZWc%bV>Z(;f;SH0*IOLA5b81`Qdd^Q8M9Qswf^6v&x7m>z5GYz`HF6oUhE|Bb*Eyg>}tG*DVC^ zfX>&bx5-)QFAmc@PZ;R@QnRL7ycV@^6D^<`=E(PhWA3PM$A($P9~_ES2Pxh#AC*>5 z0XEjXwJZ}E1r+S2D2RwI%L71jx5xI0vjk@fm~>an!Ez09erwR-2fE^c$F(K{<_SOu z7NhlnF!?CrDMmI`Gu{LM+Z^=MIW~@4ScK@hNxp+763%aj!y|6k2xA1g&vZ%Z=dIF@ zOT399aBd%TvEfLRFJ?P@-;KfIGd6XOyQPhZKJ3Jek3xuI$KA#$yp)O`P;w=3N#Jf- zTG22Gt~syjWeiC4t(uyFJIOxU2$dL;1o4=B1S^ci0q;ms#=Nv-xwOP?jq+bP?elpV z5&W?2<{d7-y!VoK<99MoI-s1&^T)1Wm`%W`3Kc3?rxKd)DUTzD>DKK@u>p4$9iW{J zwyVZDnNeZKN$qiJw&K*VGKymbULZZ9udWy{JTT-kFgWqKaxQ%6);sv{^1Ln&i@CJ( zl>9(CDQh;F@30VLnc=x-nlCgnos&lA8wOxJV0S{>+d?^iGi2a2-=iDdNjuYlBF=ij zpw#8uT{5(VjEg>P;i&Vy_gfFiyWD`bhEZi0dn-pGsxBJ0q?isY+C9Q5xT!Hni|8)e7RZ3H17$(p^+?|$6uQGHhu^8H@VC+gf%MoJ&nemmd} zMBb$KS~Nx${*PPAbsRal!yK!yE?!7iz40dqTC}=BxHpY7-9bG5k(|=@1u`(xo?j&T-cr|l6q4cVlNE*uRS_htKvrJ2?tf7V_po)m2;!Juqe_M-P09-tx@qZ5u!PwoiSj>iM~G|%vw&zXx7g>bFU)wO%h*DR%5~(| z677L2h6S}wi8161*?eN;mEcveT_?IqmkRf7ZC)N!r0#~gD0%07j)m|Ogd;KGj`+Ec zrSY&@213BOF39u)%cpdcR^WToRah34u|W*620in2ZmevcxrUmy=mtb9!<)zP z3rq{L(RLK{N;=IJm1qG%)gTfYJ z-1;eC!j!6uypZU7Mk*k?Fr|eMb1&c3HGWk*4oH`eHTwt&2GZCjNWS1lF_$F_8@TY5 zWFDhB=3KAL90~=DBwLTrGd}kBdrg(;?jQ*-;3%UE!jbkmM+P5hy?k*yMqzR#TScB3RZe;aN}dO?=n8!`Fz32ws*f*VqR|!dZlR)N*Zh%hB`>p zo=ryxnL2E`YT(^D*k09na&xVkD39FrSB{-njXz9hfodbqr5d!LNWv(dhmscBGzI#D zh%X$GM_rG1lM?3;$>KumGgg|RSR zqAwNdvKc1FC$POC=nQ8V&!yy2HGpU+YHN!|Syt8ZP$A+e1<0S~H!t_lUf-GgozpwH zb?QgLi?a9|me3!=2D@&L$UJfmh`bxwJ-F+nlo-7+crYS=7Xiaj-;8`yLBmdjOo3>& zK4*{Db2oVm0?3@SM=qkYxGyW=J6%t>ieVyxfCOw@jiGMKifXNzJX>$Qm7Vm&^?UX4neaVMF~14N~p zUz!Xx%oWKf^6gVsf?9;zj-3^x7m?-!ENSO!ugAPig}tfAk9CHDtJZ7J1@YvhI}i`* z=){C-+Rcu=vSs(z4aetV;J#`UdRn$-`%;_^n*{lYB*m$5SnR&Yrb_(zLm81BXmlL)@ex0 zmeKS@!>l0BRa{XUgj)Ie2<2I6$?0lSpvGeN@RXBqTB zr)V13qrjgUfBk`w7sx*!dR%MH1-TRCONPSVZOV6=Qd}kj5Z)| zw2-0Jh-7#F>sD1}WW+fqPUijT`SGW}>DRyd?>~P3-LL+cesz8=)AXzSdtK(wzq{ef z@BPU%FUzm`xgWZDo`%8S_v1Rxzxt;8XL_FIWB>5uPsa})>8C&Z_P4+JzWJ->H-7AW zn?GrPlb7@B%gg!qzmQ*l{TJVS^AG<2n{U4TNho~#Z+`xr-~Zv!^#1GH={P28t zJ^7?x{`h`=yngu2?|=Hu_uu6&e)sbye{=l&(chfUPyg$AzWe62Io&V{N~Ri^z)B= zd4*pd_^V<0GSUC^6aPF!M(fWv|F--4c^rmeTD$AM-gE1={_39bn7jAxyXEcfWq+3O z9e#OpbI|?${dW7fj^}v}+c?c@cMtotuS>U_+j5O}|8&RM?fai!{d3hgZo@om^K&lC z)%U}=Ue~f+SHBHc|Lo`6_2Ss-?}4%*`51+ZTEalOM6v6T=%)%y}>>A zX&ATTxbElb?$?07+rCZncx+P{jj^A*@xIQb>-+uK`r&<^%_p2|Hx27Nbp8Gg<2<+b z8t3WR-($X(=UE>wGW7FyY*&8{-FRKcwU5KSEO)tT>WtdF&)aw$+c~?(*}d;PF3Yn& zTi?xRf9~(wUUi+vFf2k~ncnN@`lB~$#^M~OV;#C>o3G~`p0T~ETc5E%miKY5@i?zz zI`3XNSxzIFyRUtJZ+*S$o(_LLKaas1HuLo^y~yqE+as}ncn*>F+cX52e{AY8HZ!pk7XR+d+EpNez$Jjp6R-FC9sT! z$2j(W9gp?BpV>?g+qK+&y!X1(cyxDn&)ZufYPBkssUL*=e2-IqKEu5Z&%Vw3wz|>y zEYIqd+xOb`X}s3+-nwyn4-0zU)?pMPo@!n9$C40fTN%U6bgjFA+%2v5>96S=yZ2a@ z&I`R(+Pbgjag;IKhk1V9bz8P~+WL7F4A-$Amg?oro^D(Bb6THr)i6B$^xWP2p68V5 z=`4cDC*6w)U+(AbMcYrg~I^IFNKg)GmpXX+>?z!uq>+aY7Sg+^4%lcYIuw~x* zcN^B@J%@fV0PD7U)n~Zg;dRA%tbIF%kj~uQR>^x065=!N-O^9vbc)IMGQYc-_ZRI| z{rcR~us>#MKj&pTx6SqEZ7tuYRekUC@fz9oDoIH)+`G$7tQXnzy2Rl+-m@8dpY!&_ z+fyRy?3ZKchwb*#lh~Q=Zku4PWnahZeJ0CbUyie6fb)Hh>)qbvD`{Zpzs`rdq&=?ZxeP|Z^z8lFZ}*`_jHaNI5hualj^Qv})3#c}`#bi%*?#Y9 zOlqIb)#`tqZWaZ7nXj|!2eDxM&-?Jd?%J<@5?Jd3&${lo4b8B&_p%~-!C-mq7L!c1 zE%Vgf<9-h-WW0$EfBlwO&dc61QYX25ZZ)mPFu$W=yUzVtyT{7gYV@9cHykp$VH=0_ zU4}`O6tvrXtji(zZwvgZ`nA1Fmk2%YYnFA@@#HEKw=eys4oQah<(}<(o%1tJk3f>3 z`bV);XIJHU+NNF^+bzEBoaQp}_j&Kvdmo44n$OGZT9zhz^z%OJ-tR*iI!gwyI`(UR zr$zQUB$nNaEyu3BKl`e-oY!cSyAn~8^(1f1>X!Q8@7G~l-eDP@``EGsER|uu%X^tX zROG#2g=2ir_vkFd(dwPfS+x^v+bExx*fX7?v0ea>?`0EL>!#tJ}_9by+%LC3jB~C~!`)-K$C* zWi(_zwRk;@+X(D!*Yj9t`|P;{$Sn&=>& z@YgXd+X02idNnqn%?qv&fn$J&Ko9opP8sn6T=sr@)!Dwjm*`tX8|Ls_>q0gdb}34J zfvUD^I+jh;3?&Y&(@y=DpL)To#9p(k(>?P4HOQsYG~7~eSzoHgJ>U0(-b@g+3MrD! zykhqBJ`7^9RL61^BzScvaJ&s?cLBO;g(R@dA37Kx$rIjf->u?i^1eMrVnLoV12wlh1-V1b}$vl?;f08j3 zKV}mg!o^T+_3FXv?aEsO}OTXn6GEkP>EjwolmYugu}&F-e%| zau-P+grDTkQT;bmR%%(5ohY1+S7jWh>s;02VH<`6)Ep5AU~YeLkEP_^1<7y|h3M^$ zTNcE)UhBZ(Rs``^`TkYt+b7&%ISKFCAdz+2_i^0kb$t)u1;L-jMMgf}wT#9o8~*9n zc5mn5^#S-pI_(frD`hbOS6L|+{5t04hZ}w{KbhxwL!bN&Z2t7~)gu_~6T*I}J@2YA zE0-7j!BXsT%$1HnrI18SNQn*EA3J!B7TVlDptbn15PS|;E_us5Qc6J%cme7pBH(0$ zxuR6F*Lz@;r^Akfy_zKFjxoH;3IbZ0L;=J=Hf`T)-K~hLUp)v~ zFJ-Df>el8haXFL|ScId?EN|Uqp2jdlut*U{s?pcs!Tg8^{(RptmNN3UlyXF=nJ5P1 z*rDmtsU&WucgoAriM0DGzC0`l;Wd*_rom`a113=2sNM#%-^cSBF86ES%isw)1O@|z zg^uaDluWoljW`t|nQp?g%g9S((oiTHa-x!f%CelKeTZl24(0<#ssaN$JphMvImrY@ zV|B}|SIZT#2qX~QIn~^2tX8$;SS)4glMnVjGezJ~*vUruv~%jpd*zvN$`|Bc$u^T3 z2z01}DSmIL-vP;WCC`ITod~Bp5CC33J6j@`DGRrJ?{1LqrLhtlenh2P6!nqDk=d5t z-%mJ=l4`Hu2>!K4 zR7V07o*@qBw3X^WQj{hYL<*uY0I{R{s2HFuBzta$3};=paY)d^d-`!UE0XD;E(t-P zSr(8=q#N}D+iR~HgA6b7kjiYDXUkD}Mh%8nLUYf}PpBR3RaWBt?EV)f({+!~B|w4o zGPrDqpI=}|&1_!w14ul{f-|^}svxaFdm@YMH9^3`T$DhGPtlW~*9E5|ZMIyuS{BUl#hDeNqX>Clgb;dn zWA_1=pkK+oojIjpb;`0aKcF_$p16eb;0+2G{QRnD?R)87MK5siW^&Be+4tAhNd=To z{~XWFOjk8^Mxy6K>3PDP_VYRETA0x%(UVDo8@BGNys->4?_rvW1=N6Tg^iuy^edg2uJFbu~@EIOl}jI+Rf>z z>K>#z?zK9c$c?-#fRG5n8dV+J2nM3tNK*u-{>E6^Cydr4%+bUNA(1ZfY{TX zW!I>Zm8(RNS)%6Aqoq!wFeqS9qy(reQ?7?lxgN2ZOu0d%h@NcZ2EC<0?;bZm6l*{g zDb4M@z&zD_0r1omSjtt4?72Xuz=7KGJUu{BunnvqiRB@}EIKPmQvyTT)5jqYoFgTG zLRjkZ3IwfqWH?)gB%h#=Bsx5lK$5`N;bLUxG8#U}E6+rmnLIva-6#(xZG$bN=)yyE z$5M%CHS+Q7jc!?S@J`zvxp_xx)IclZ+NXXhSAFTu{*(VKcpgT@qB z)zTndkzeRB?^rOlDHU%aC0S8Rf+*SLRv-0Q3~dB4d9j@@HCvZ0X&cHzJtLBz@R4?) z)|~RHnhoPo#!xea5~K^GYY?zP-Bo5^nj_Y;sYJW((e#%05}?O>m?q+aMM=LS)TCjg z$Ok!+6$$4REvKiT4rP@|-BubD22lZAvpQ*&8JBf^3f}0bC+bIsJd6YsQ*H$aq{s)R zpUg@j^%Cu9NR?T)@ZBKsp=zIuPcy`*lyYse0eD^t`s%UTSsqdrwtl_AcO8o^%~gUK zsAxVyQp;Kv(JPEcF~d?kR+>$eFyu7L;36uJKsB`5CnTIA^oHbCKqzaPFe3o{Qf3v* zVcmSvNH=e<`XvG>k}?PW`pSaavq?PqF>nO|c`Ng!T)m$TELM1lD?j0e5do;3odf_> z5S9v#O7f?-JWA#S2BuZ1f2Fodxmq|Im?!I1ZJAC*l<&xm;sIiHWmZ~hZ-6rVI{H1U)mrF)gUQC>-SAN0~A zx0h@KMG9WcVNGbFBeu9sy;Kt^-CBv_`5dxDdoRsEsd`FT_C#iBl1U>CfHeTt0!T?! zmC>Jv<~2T|2pMM;Y$jNI3YYNIRh|4&-O5@{`#Pn^n3Ju6`%!0j4WIAvRDu!V9C0jp z<00MZV275!7p!JTsILnhKL|;z>3HgBmgwy$AW@Z(P}+yzsmPF#wt-yo&>x0z09Q&J zj`ZQ0U$=lGz?FlQNtUsMF{I8&;w$C7 zCy(N0_ z7|&p4s-ODNcgCYia#7cboilc;ztG7T2cmHDEe z`26rnVwT~)hy`8^3=)P+GY}3!Q0p=#H|W18M2jW^2?<9Q7_jIjafr}su(oSHgqDRp z3EpI%P}qq_aeYu%dKjV?{X4yPEA>iQ^N>eqcBqgIHmI;<#xhreAlOpLY zfU{QgaDvB}ZWnw3-BzL`V;j7D+!*h-6Y#ovQ}R?Mp(=4hO`$Y^9wTCT-cOZNhYGL@mB4E%(Pw&NXYW+OG~g(T4kYZ$7y_+U>X9BV z^Fpj?7(w_38_ojw(Hy8nd%qp3+PT5{p9)Meq}AqE+CMbYc}(*EzPrh)rP zY+oh^ZCI?17kZ5}P9l}G+c*TT4A!bj!pnw0B{rs)%i%&*7r4}w_%LguQbL;JbkFi$=^CUoK7yZSrJiah0+l^J)%V93;a(zXuaYM9oq{;kY?DmG zSNsDCWYdq~ihiu=)?O7c$uf})Wg60+Rv3<^!cQtWBm#*l@mHy^qFd_B;bm4Y*hT?f1E^F-b>p+W1W<&)5+zb9V$he zRt=|D2S&0d4UiYwC#2z931kW>n(k#8_mT!B717Hea+z;Qzem2nSgNcn38kuETSc&Ji+wzo7}d$W83sU+ zv8n~AepVWa*m}wH>OIxfQU_CqX!S;sMlteyOlFamMxf>oXrGWug~UcgCei5Au!dkJ zLVnYb(mW_nuPl*k!01JyOo`!)H=@m6t zi0wvSQUDNkNvC#-iD-Z-9e}G*hYY;qZpb2lR}glAEV3-<_PxM<;J|Av>#Pw-;m6%# zCDionV5)Pu%yD%eF?$7jGyec9sd})bZ1oYe49gA5voee&A9CU%7nTC^7k8Mzm!YY~ zRQH20kk(;pdLH%F;2V>sxMsSd1mA6!l#jn9!k zh?9U>Z5}LEDZN}pD+Qx*4N{D57PL19+Mo>JnAygDwTkvK%i8(Uu5luIft7~}7-t@r zXs5Wyidd&2Q)-IF@@urE8MW9#8w@I;QZWXYUmBRc1nWtu=JGic8nF>5rG9{=bWN-f zUDoj-f89iTOrjmbUsyKQ7V!_d(0x%$G?3Cv3lI?D?Nye9hL5KCrH4af4E2i@R8_|6 z4^rnyP?$@`*2$L8C{u2EmkyH(^Rm*ypcVp zq~c>meYr}@P#rbk0$z7oiV{c$GVX5aj!dolRXh7ksiaX2Yy9=462!Nqn@67Sc{*2x zLYXyZ?Y%%V05tlk(TvPU$`-(krMhn)o0K-oeiu%pOQ6X`wz_B%ba37UV@F%FX!v#+ zcFhKlN_(&PV)U*xpD+j19;eqyr|#{BR8lp#C}w3GhkXH3AyN_biv9#olTiW{!EzF@ znv7ru02Us4nSvC3SmI3%mUE?qtTsNk_FhJspr5=3vx4R$Ud^1LT?Fh24`vvZ zZz}gPKC=P`{Bx+9Vt}9MDMzEchnxaRVA_?@`%wX(oDnFXm87M(GGsD!@|-biKNhVz z)jpy81Lx|=MdytTwl@s%*i1rwSBDmvfvDPErAehY%&D+80uaXt41_NxaD1C8hrxP< z+P$Tg-y~3qy2nwF*>&VK7am51F2i7m5!Lq^@nRDV`l@q!BnE7G%{zk@=ciap1z>k+ zpU_ayU6g;y3#EpIGmaY~Dw^a*P#*aJ({Hc3y$Vbc6@^p)ZxEpu?hkOB1e89B6~^J! zUS;uZaYF0=A$wpe>)f!)zY@M!vQ7YEt;cc|+KwaWbt$DhBD?JV@ovw8mkzEDkDj47KJs3s6f4gmcxLl=mWkC{cjJyO2V;e9a#yw)4gj=B58tHqg0l zpHL50sGAj=ZY+FRThPlCDacF?0!xdvs}aRrs>$==D2fQDVQ3oefEUDBVJ@aL-pal3 zp6pUMIMOUyatL6*H1nNWz$G$cw-d*Ks;6xEG_V2uN99t@0+iiJ2Yr;WhE6;pv`gn!}7rJCC} z6v2u@FDk4)%DPOdN2%r1n=FMT>Ty9SpWaNc2y=sG9o;9$naM3QRNm%X<3_H$l6nz6 zj$R|#YCSUZ>CA^?#I$=yX_9b`67vsC87WGo!#b$Tda<#wL-TxkvWmupPFJb0hTJ9v zp9bynh3FZIl*e6&X09N|Vpd|ioi75gECW5IvT8$XifGAe+eyB1k2b>OEvxHe)9@m$ zTg6taS^{qMQVm^z#NQ{}b*0dk_FfW(#tp?O?Lc}cd`YR~1d*u8f&BQ|w3bhM)k1tR zAd9AwZVC|^SK-r#%_VN*qouPLtP+Ph64nX1cPDRV%NNczF>?*tMjV=w4l~JD4if;d z7gY4tcm=|LOd64cl%q>W%$2h!X(h91aMFMbpD{Vjvqf=5OtiSj`!Z0#6*khom);T0 z%MQ{YW~*PeDl-7rpTHXtRC!O#GbM@&M2n^-`(xu!O{9*YMZO)2_@S(JP zF5uhnheTbkZUxx}EJd9O9*}#D-q_wCSCF5DSi~Z;WGykM@8M5$2iaP+tJ?Px-b8Va zNLmNr2>^hYR;jvA+Ol%Af7xJ4?!~Z;p}G*-KB-Hoddy{f0#I#sLN3aG>C`OVgu}2ltYNKw))Kq;Ax}`cT3>CIR1Y9B9UPUh_e9*ffp_AB&6>?ah0XE!dZ_|ERcEOnTDn8Lz zk~T?7&W1~TieRxgcO-h%R*-dc@Y<`m>n@usQ|XZ>dLu`UrK4j5W*f!L_K@u8YOm72 z)(nh)fEz0Ad&~{SjF($TNd1h^(nh&T9rSE|niWbE>pW=Y2c+P$H==g{fkAubq_3eaJdKp`9h6>q@rI2olEak3LD4txDP*_enE3ms<$=S!*$Yt6n3 z+bpdzvxQA43LlS&b|;ndUdt@ofdFLC3B&A}$rNzTC3dq%I*i#dn~?#ub_}gA#S*RO zB#;a2Ft$TlC3AskMuz2Y>#yLAta6#{A)Rj84Y-izjie~~+9Cz92@MFpC}!FO-bI@b ztD$j99U>MeSAlSLV3c8m+EHSjff_~iiH1BfR&-IXv=~fVv(uL3Z0Boh-%HL#DuF^V zu8V@xaEMZ<29)>U;>9BTNF76RJVRO{H%0^c4VA~Po@M0D7SS#Y}{T%537Q& zQvFw5G&XHa^4}Ec7@h6%{mKupnq(3o-61k<KUF5LRmS>a+BduZfFL zvh)GoYwMx;CVWK5qz$?esiYlwYG%YFOF5g(z<2;j{nWoP-3&hdJhi>TgD7E9 zX!Brr9`&)M#VmJR38Wk6EL6|!XXmY-iqc|>gb3H^j16A_(u&Zv0S(a0_6Y;=s1*3F zo$1O-s%3V6!;EaBxJ1y~SNIUM5kzSx=ZE!;(HSz7+CyJq46@%1VtY#*>YR+^Aps)2 z>=9rQjWoCI!(d6)MHI;tuJ#E(8;#g!Q(Qk=b!hdtln+X`iuqBU(Q+;$uWrc{k*~%G zUOARQ#&9@j`FJVkwSP)4EhgIb3H9>0awrt? zA3?z0I4)^W7FC8^brq~HT&}#AEX%znf5m+qVi&=~p#l69fN<6~(L^OJ_ws{GD`J!e zu9<+|U|t@8NU9ZqDVfA^DR^GHbxi9q7)L_v$-Ls$lDf%mdgJ5>6JPg9vDA1%bM2F( zz%cl*wK>Q`ksn!jvdJDKE7+ztNgB%fH(-KNCD$7{E~BM-y22{*f+dTAekhnPuWGBWeOeJ*{;8>j|roW#f0 zkOmL^P_Du7kSjAh16Q$0mzAQwsI1~Z@B#d)Q?9vYI|Dc_zv+ZQe14|fODizSp%Y9< zccda}jY3FAU0KzLUe0;20&9guCnb)a%&uF=n?kV7b`>udUl%k>QL){nR3g9(e`M^k zi_()xWAXq25cc;kR6ycXP=o!=b+8g*1uB)Ru--UQ=7Wr& z^1wx?E@QhQ&j>)56)J2-Weiam{Xd328!Yso;at_#1-evrf1Z?C)1qh9KB3Qq0;wy+e;{rF+WxisBqN>M3 zNF}rlp$}CrdczfFQ!PtHt7FWB4htK{6zXXpMr|j*QS#A+hCP5+zz`HO&vrr`Ba343KNO z7=MuettoSf#c^FB4NboO%p~(Z+t{&`ws{UF|lrzkss>ezP;HjYCr} z=809=b5E>?;}sc%-XH`WQkD#9Um^+t^71VHf{8St{3)lFJ};taq_LuV+N`A#E&8s) z-cbz3JeyPW!HD6uiRc(X&C;q$UH2eF;377p?GIi;g=}V!hUye2msHMw7hR-%FYM6( zvUX^yXeVvPlFS%sT!SDn~o*3<5j=E8mfNP{16o;X}(9YGQI~ zfBG&M7xhk0Mu%9Zm?^Jt8x+)4Q1Oo7-C5vZLgAe znNn#_cHCvJ*$JXMMj~{V9$*`z_o5;cEXvHH9l|i9iJ&1Qi`mAisKkaxdXRG%5VaoK zd)W;rQ&_I3LUoxk;p26_zIu$gj%$hVybtC@=IZ2A%`t}cH-Lr;WP*> z92d6lg;~g2bpEvAc|mya43;5emIh9 z8ADJ8nWkP-op|qUzEzXqMO7M@M_8Ce9AQu}02F6Wsbi89Z|zsH>_8@LmJ&m9W0v5b z)pIX7${2}RFg8jY5hiCd;k}TTW|fA!c%eU3UK#UmsvL>SnT57SV`+j_*H|s$&fxr) zJnHU6K5sQeF(3_bV6eV=6Z)wgA=xu5#$wW_C#``q_V;z{by9}JnYgeDEqmR1!lJs% zhdz~zt+7OJA_I6bi#NoY01&E2c5`lo=)j_D%&P_bs+%LNEKKV$Kq8wojh~nDa*(eg zTXHYUESZ&8n+!-}&@RM}Cpddlbia=k}t!!D&_DXnJN2wSLdar?M@1=S0(Sq0x@p02Y-b6H%77ae`NJvFP zu_|Wb+>}|ATD3G-OVnj7QI;6g+%a;3KGfj$Ub-h%GNv6*gN|c%S%Ck{cME0T_RzSB zZ3Jtt%HC2S8C#WUqMiE?%q=DnEh96`r%h!k(Hr}5aGTB-He;Wqw_>@sfJ8Iv)=T-8 z#0pAL7eZqkZxm04YGP5@vE-)N7;H{-=$N|76LQF@{hDR~I(3mZkKHSb!NX;npLQlQ zM|IufB7Px{IFw2eD>ZK7=UHhO0?{bS9<8+<4Y&~6H-e{%LJmpK7}l2NEsvZZ#E<*v z%(hoK6i?33TC>N>0v3-z91o{dL9^VM$z^-f*Ioq|(@J$L)jV31Qf|5k)A8KGQEaJp z$uRG>S8+(P)?`({|GM(5EgW`?+_9r!hV(?6AL|Kn&Y1v1?W)qbg^P5gEKX+_QeTYg z7Yz-+EhvTNVs@OX0<391+1chiEDV^nfNck4&Zbds&_1DU<`xz&q@4?J3N=tAV{?F& zr7nITsV$9`_A11iTO>P|>GPtOuwH9eNgfnuq>RZ$+su>r2fBN6{g58_f2A0^Lp_B9ncm zIF{osG)GEAR1q}*#nD^SXFu=yf$LVy0Q8} zFqUqb)>o&;aDJ;&WN50P6_KM--9RUrT1FQG(Q}elW!cP_s+zIRK{GNR1BWC)P_(+f za1fpe5<)wMc6n($Q|Hr6rUwvrOdJEEMadu(o~6q&lsJ@vXn4x2m!gy*$ijmm>EnMV zbVyo`<*gkb`+s!Cvu7ksEG-zfu$G|~dn6TC2s-Y}_6a=}om$+^bJHuTu?s8SM!y#Z{`@(+%f<4~O3w3nKKck<^FLz!9#DkR_V8z<}u z5CxWe1Nb`lqa6)vN=R7wpYsPL=$Mo@TbgYLfU@<}kzWpkaup{JXAJ2z$1y7d5tDOH z07j4a*&6~=Sa0oBc5s;);1T&sTOigbdIpf5J%ehwG$i+xm4e4O9gufj24q_Sv~?hv z5A4$sw)*SCOn5U8t-oHru8K( zDc|~vDj6hdiMQ|N=JaYFNAxL56!sci;;{k>lBZM)kI-JFWYLm1Kh-`{W>?&YhvKwm z(Y_I?aYVv0hNN1C?U=%IzJYS7mjItq(LS3_!K+9rW66g)1wgp6q8&}s|7=I(>SMa4 z9PklY-9&_{a1sE5`bTWYksBbr!$?%}m|S2K3YJo-;1rCVLk00kgbl4EnMuRabUaR! znUH5DA^KE#LZXnpn0iBmC}N{wC(iK{iZPvcSP1Y2eadJk!f8z!4Z4v2Uru=`QW1vH2Wuf;%*mYQ>z#S(F} zxil9x4|0o{_Ffcg_pB1_Rkr7lc*q$tEOC*r9Kf1W z76O)WF(+j30GB7E$sI9P}5@B&S^oVI;680TM4|R0P#^{Lh_o9j|xP((KKN|u1b53+f(uF{-ZtHJ3u$Y z^z7%dGvebfqQB?(#&*7b84cYD-2(~)_m}f}43Bjg?J_6Q)QF-^q{l(bQh(JJ9hu@1f_cj&U|vu*MBJInx4DN;vG&b0ez88E6SvEM+t#GzizyPd6WeMOS(3qFxeb zp?so+T}buZ1N~HO1rh)(YVfcr99w;`uf={LHVctYuP#}Xa4l#tW#Wq&N8#+1L~%aH z-X^Ol0LoJtc_-Rak?hrrt;e)F`GhMKexO~eM(l}^errC=#%e0ClfAf16cnW~hmasN zwgc(uP^ui>T6v1WmW)s#XIaYZ3J~2)k`e2HHahS*S*J!6$dKBWAe?bK>A-`*b}*>1 zp{wv`>0lHlC1_cH)x{hK76>n^1*%AlDVJIg97$GtiC#$;7Gf`87}oajp9Fa%7;Fg+ z^Ol73>c-5z1JIaBD~FHSbcz`9zgaGFsDK2gtdq*JosB>p2-j08MIpC@u7T~31Q_BF zZ9B)4U8OFY*(m8=&D#HTedQ|EGTMF=T)uHNcP(sGkXr3r5o`q7b6{BHIZvD;MY7$* zJtc&aH?T5QR7Qlrs8Skp=}L){oNSZ6Ok6QgM>~e<49bj>tM3OYV+9{b0uLKlwnQe*^h^eAY2o1K~L2XZ& zki5zm5{CpI=cZP&{Ks@ag2AHL^DH_Nq*8%HlsJT9UVHXp5JiS`nb_YBI%v+>S9xt{ zhzd|qrQDl$Q7vI{<;Q>J%LqK>r}d~EhP05v+W8vN$M|fPwFX%REHt(V(pP8>s+w#p zoXZ&Im=4`fQ4mkN76r=_?*lAMw8c~z)49*8<`4@vHzZsR{t|;^?*}T7bTg;`0ZXxL zQ0^7;tW`)7&ew3H2g`NFK9+EgpCXPuJ^u1u=88;_Q}ZB7f)S$~bF{}}s7CcEzxW4@ zza0&oP$mMamGFXsdxuZ!M&8uU*7LX3GW@GtWj89>8KY+wdO1>f&1F^ZCF$E*?6b6} zb&=b9k!EwKjYGHeZS83>KfDXH4t0eEQLZFZIS9ACif@=yt|97ZJE}zNUTKKih?t$A z{^j%vu~@K;1d*ah3SjG{criXvt)gA9kPD_b;9{I;?`4Vj8-y2rXNc2y2yU^7)S%da z^ zAVmvIHc;9!fxh?* z6T(K50uXtdlJQkthFz2_e?4=v(>O6Uqs^0=;~g{y0B;7mS}Q+ih1)-?3nd_-Af5JX zWm6szbNx=XAKlEdzQ99bw~+Nnj_A-c3`=9=NKb)3FlWMWf0S8v#Ck+A zjgF?5{W}sYY^2>u+YBbtd2p& z05B;p$6DB(%JM|e5Br|V6Q=hH24!!F&HX|AloOf^4{gpG^UHBL+)(YkK8*z|Fx#t1 z*;b3LD~S22<}n|!Wb4_L%*G-Xdo$xjOoW;|I7!4S9MlrmbI7(VU5@r%I0juEdbE{| zm!G*UhY9G4+tO{*9kY|Oo2W4DRkD~i9)|&&avYZMjM4Om7Ah7T*RNJ~RRVQO>SBdtiiIbQ)@wt-I3%+8_qUQF){ zDA@_5hC8_q6(S~iBhs%8_MoO(q^y^#9XTFW`UEJqDzZ&fz>y3&?E z&zpK*cYsPLIq!kq&ZMM44hg_%e2+vqjdwDdzYuBd6Z)1FUKQNk*K64VdMl^djv^~) zK*TZa7zJ(Ma#aqtgf>ymwAl2K%&hLQ{Tr}JB2=;@JeP>FQON=XG!%3F2B)R@3KYr^ zWq^^xz6#1Hs9La%nI##lyJIizR*PT z*)g~tG;SGEcf}jGqoK0l%lQ@ptw$^m>eI!_N%>kzWbJ$pmW^E{AKC-~n3H=YG^sB} z*&rI0ClWj_#%BwTJlx*Pw+5>BTw&R0sk3fGd>sUjzAhC}%(;&AU!&Dtg+tQPgE;KU za$*RNO1|ICkVia-34}*#UG3glzp`X@JVlry@bT!7P*DLxA4Vl>lSWD$D%*Ll z?9$7qS%|uH#11-m)ROEkv`*@2KXQUwcs1hy7-KA|tV?yxGKOrSmNL}J z?#e-E8nTI-p+)`t-s;>N{ zag3sSltGOj2Q}DhMhmlX)VDZP-G%_A6&2YZKtSI(9P*_Vpcn{i293W}TF)emIA&jJ ze0_u%MgT|>3kHI-n*ixk1Ece`?A^h4+IdzaJu8?04nj8(InmdeuE-%U9<(I| zMCW1)bTp3T3pto#b4E?jTCXR`E0U0*m4XH70&vO`uwYi?MrI;gV)zzQspb-=2B$L> z=xa{wK*uPr4wa169pjcmR2V*z_PtcZY}^Qdf;g2AIf@6-p?b~;$}SmV5=fAQ;oAzu#tuaEh!Bk->y@c;D){2yX$ BbV>jK literal 12545 zcmV+cG5*d%T4*^jL0KkKSs0R>bN~u`|Aqh60DxEp|MGA{R0{w1{%Qb$AOHvmU^?F) zaQdTm8Bvj~nAob(ZHQLQ zYPKdZXU~YLe}G0zRH8|yjT!(A0001J3a8)zno_DX&>8>$VFrKz0i;4bPZVSasAMz% z001-qG$N{h-~=&MC;$Ke00000ik@mRG|>h}pnwBF115t&01`wHCXA@rPYIwS)Wkgk zGf2WP6K89Ga(ls@u(B88gn?<%RTvhRXkVrc7J5UbCQ<;0Ta1{1J#0D#-q+{ny?EoJ zcBe@QQ(+{8H0ntS0acD5LPFXS0S2W852Lw`ZxaC>E3RQFS z^o)6efR)0xmg&S^Fxy(w3gUO6eMQ2W-EP+(t#lS<5N}F-rvymXf}XHvd)`y3CSAd% zq;p28qcljZK<$MOJegqheC*l{(vN9-F`Os~nz*N_mjir>lsc@TiiTxx)Hr{q*ki7?7}>C z#ko>-N|H$Ut-~ljDO*IbV5bA>LJ|WRBIV=a)0Ysn4PJZADIp*=dTYjG$*i`_;|V}< z=PBpS7mn6>Pb_mhbV?66(^WKNVVbKq7)pi!*~bXX*y=3FkCk@ezL;1wp{)dT-?C(JfJDkk~A0F~VoSW|O#2VpfiXsO#p2Nb0BLu{u zr}fb=KF8KN9vJF1PU1#k-R#B;4_hl1JL?i_n#vi*)i1HgdX3c<8SD6ZL=6IOEp}2o z4KaJ(4|~haEPRs@gG%H1!GmYI^O@!kK4%XTx&uidDs-D<;B-O8?j)C{#pT1%e5yMt z0`IXOwS*deE45&y#1{3Bn&+Vb?csF}=8)xStR#Dw6MReWCkxQ7^1+c7G%K)atU6xV(`lP(-V*XUP$e%2 zImj{{HtwYB_MUbpm>o*I*ynAeAHp^sqpc%`*D!S@*TlaqRkm}sjzy`_uLS1vmOmCl;PYMt+tP2!u=*Jw9f zim=ZF0w&SG9e2?3ee(c;1qDx2r9tWFb1+SqGrj9Ow9+(pfK5f>tF7y}SP<_0l^)Pt6r8$ehZzuA zUkA0)7s2y5amO6PaDyi1*QRXNZ$|9IhjmfFm#mpxz??3eRBP%a@LzN5I{;`yS*9>( zm3r%$<3blML{BNoc`McpTMo0GoXXc&w7Z29YT9vforvP07p)?>M4?Z;aE?mSS7t&I z0>wpq*GBqheb_FdnOk-DJ=9H}!pE*O&+&x%72m=3Z%XlkFQr2(9|gg8^VIZtr(J;z zW2;8EY6V5oq)pLD9?CP`9eu(x7q?)~pMd9$lP~b!f{Ezl7lMmW=t@GuxjCS@sRc9jejjCvD%rVz-KB=Nzg_u6cH#6Gt63jeUL7E2(3Z ziu5$z%fh{L48d~ZS}B=DpOKR_!wcOPn1rUHNR!QYY}yLXdLXR#rPAsUfsW_ted(q2 zI!X8E+LhjdY`4Z44srpBxv`-rG84)9-x1mw#0Go3jRRzK#HO2d7y0^ z>x{ifSWkIKqW2pFA~ys%QQgf$NHlSPa8p|34F`<_)7go?7ExfII$i3IAzOP)3Csxx zyQ8d9vCY8VL|)KXkC2wnIxJeE=ePCgvhQ@T&#fxEfM;dTO*}#Z?Xo=`GnQDK*r7*{ z9GQpS4$5H_0Xt4MrcYPCKVsDCtw`19W3zq~$I$OF+_W1_aGoik3_Xt&EEw=P1Z7#x z!W$Cmd{IXO*)vJcsEXBC{?>&&LkgvA?o(5r#sy@ ze>+{l8^@I>kYV`vlyphpLyD6*sGlQ*t?*~hQHV){(M>X}9Ywy*ScIP8hzf4YPGILT zysR?*`D-y9on=sShSkgG-uo)&gf{k>Wlv&|N)ZFi)Sr^-V+Kwy19a-zTr?*&)&)e+tY7sD5b)G0s7Xd5J z9djhaBxAblqYA`@$)}&7ZIGY<G zu-|vn%GNxDQ#u93KCs{n5c^7l8sHA3nMzunW?9gVbULq{Fh>&y>rSbLZRsfSZshFs zGSDOjQqu(K#Ey2e+ipv%13o=^v5t!f97E)UpD#WG=P3f*f;M?FVzI&O z&oK0cg#l4B9_q}AlPdOHLf`DZRd7D7^=20R)7&)(=7I-;OMT)ZZUflY668qsgfr6# zVIH3J`)}PcD#%#tD`#@VGTR9Kc!d`*(?jUH(mUHe7p%b13&p}ffY5R4MfzCMP&_Dz z0}!YLQjF*#-8*p!Z;_N35^Bc7@a z$>`26xrG zCsLxT0;sy$oJ9aJyyo{HI^fI`HQqg4`j>c;yxN{q_&p^S9G0^;DOP5y5rUOIuc3uJ zPr$q3VjlAnTER%3rFu(rnuyEGz0k+3V}li}!?|@9lmcux$nK#EV039sV~2dwNNs_) zv6i`YB#W`7HbyDl=<{nXde(^)F@REW(SV-4@!m~oT5FS)#E3CrIjNM=EW7UmN^cZ= z4@oylP7Qt`k(2bbt^~%|7lH4#Z-fddU=z-*^bN4#Ekq2i6Nj2!6ur|9X|Nt|We+g7 zaMdjQjXUT@qa#AoUd*2I=mjE_u1_zbtW(UCr=GyuxQ{SY7omy8C2TRrV6&sFOy1rR zd)0OWEOFNQ623#H1BmsU?l}HH7s|*~`y}&E(D|JDlj(ZEkETt0EkTqs%gWGp`VZPl z;m^I7pbJ{V4n1fcx^cEO**^H|Z@sA$-Q4dguU)d05Th%K@c5f~#DT{3W1IS}<4L%V zV>~x>zdkA(%yMgxcHb{Oxf%z_4(s`M#V{e`r|TAgZPLt4p@IdxdUDFk0oNMvE+_zq zd9Q9IWVf4S{E+y0Lj2lY&LZjA@yx_;Zozq?+$fEiDBcTCFA6F$`ibNnc0N?GR+_1@ zo4!s><8JxSCtx`wspacJ5@asS?WJB`CHrI10jhb>FY8)GsQ!hTbl(K%fMCaqS!&fPJ~cYWrzRUz$`i*8|J(I#-kc`!U7 zY9PBK!s_>e$uN%VK=bJ=zZeb&N;U^MS;2r^x;x0@av4CsCPRlO&zn+d?0*ZA$&!`* zt&hKJ4LR;2y`C+E+rFI|K+IM5Y0JDUMPO766X7{S_6s;_Hq1)^zUuHp)}s4 z=usjiw6anNu5FJ>m>yk|*bm&w>aoqp3!G+qtI!?mLK(lMj;bASRf_bpOV;qb>m6jY zkxZC;YkA|OV7Qd~^w@yS5vwpq&)^n_&cBL+|_)v3f>RrC`*JPz+?Z!w2 zR38eIl2EbZm@&{28SH^&)7#KC?@jfhWh96gYMJ&r{Z^N_&oUS`}j38SX2@@$2 zL}q(d70R`tZ!KNT774KLc~ax*rhj5{rcIXDViM0z-g0}U7e#rZN#?`pE)4^xTIAiH zgi@M$QlJ+&TZw*Fm-*JIsSG?nbJ(n&N%;%M5^pe_gId?-7q*X0`D^2^K^07^Ep`k?*I0K8x=aztc7-^W;CH?e&}o{?IcdaY zAz<4!cAY~fiEFT=$Pym2t-`JDvSWIBAq3mEM8kuE z(W|!u%RZ&h$s>@5EKFl@Wnf(Rxo+B{bTsv}=Z>i7s(UXX0tO6sCd|tH4(#tE$og&d z+PIhxXR$|0=c}=hYn>~c+B}{?_Xb%ZZ=K9_>At~{&H9E}Ccs#I3)iH?RAJyoF zC!+fW1?w*n17`zgwbWuvG*Fb6(5S9YEIgw*L*}%J^xoni0lU(LCv{x#eUSRCqD0;o zAy3UY)`<=7Kve8OsF)*KE=%cwO1WwYCjuQcYPx8w_e5FAOC={s;L1HQkxT(JYO>qj zan&v!$rk3<9ywolIJZq>8$NipNMhb~@WhJ;V~Arm#X_PbFl9jeWTvKLV46Y)v*?JY zu9A8{rHp6Qapc3mGh7?bqu+td{NRUip@fCzTj2cKQoiSOjiH8Eg~O9H zFE+XHy&2{X2v9dHMA3B2hYqMRXB}=~xsmQlF9(&NuGK|7C8;zqVXsPi0p>Ag>0$Xa zhIXe7(HNu)c`tjhVocJs>Mh8A)puiwjnJaM3>M##!CTOfkKaoqWwFM+gr?x~=y((w z*e1*82;Bgzz9^_7!5INHmxc@;W$Wo2I(^BxI<$t(|#8-%wqe zdy&(&1#6RR%Ns&B$xW*Hj+ssbOx$9)U4oYu%p&+n(Kz%@%Jw^362(bM5;a1R?Ts}j zolg$3tSs<@dr};T;-FZ*7FJ#3>*;~2er@MxNIisZIWooqZCG3(!)xMoBvazH&$^Xe z5@%pKe!3+geMT(I5?%*To)e(O(a72GK%5VAT0~4iW`^>IH9Zdwtu)Q8;!Mit2P(+y zD(r`v1K_i{wI}VkW-PSnkH{Yh9jl>CWI9D+;s*x2d1f(?{mrD?p}Fb%Yp#7ci>SL2 z*xS@PWjtj`nR`$v&z4b}(1KSR;`4Wi2-s20JPRz-4~g!y0dgjKQ*n|N z1-iuwPmkTvW8Ml4$prb-sFS8}<_z)^oaS;d$l~%+;ztQ_Z98OIT4Yr7#GvqU;}&UU zGW39?I6%EJ%nFP;?zG>N1lEtLyvTy=pN}#jpQSb&JYbw|iM?Bx9*=vYdo|xTRvmSH zVH7ubJMR+)*D~N9G$Aj6Sv!0%9_#0#xsfi49NUSYL1JiSswP?7dQ2r-+VE_$IG|b- z2Tzqdp68ajy-Y`r#v7q;fh%*zcFxmjq)s^6R68fYT8Z?HBRv8?1@{{aFmbX9sn8yo z#K#vn{nu1`P6n($l=`KUI|i0ENFysYP9Ex62ZWX~{4lVTxasJ7@QfW*@yML-y@j26 z>W#^xDO;sH+JplVFsQc$o{GXzc+@k@yUE}th0eIoL3rr--V@B1Le86UX7bQvy7Kb% z^=T}YeY9HIi8-e2F%9k?koltbSBprB`?8}Er{jb+R7!XVk}JUg9ml-AL*p>Nh;CPh zfaA?0MG{GW6jAUSwy;lqYhEBu4EHE&+svqo^|#EDZiLItx@Ut9ui6~pimA5h50&bi zC)X41RmpfsuNXPfItolllN?T4m$-n5A%WoGDNoeNa7eo~?hr|^@q$eC*@~Y;m6eZ` zo2)dUJYuCf)&xQkvmEmiKSa6Ey*3Jcq5hXc)9L^ z?$LoHEn)Z5Ya#+*%#SL}9;6p-Vv4aces#CL4c1-@%`4r8VX+Ul!{r45#cHHE*uInv z8OY*14Qp#Rm1IK7Wu^hiXDKdK#XWs^O-A&o@r}Wb$I@AdiZB8VZN(i0oVKY#;w)L(s`g*ro?HXQ z6l|zrA*^Ng%4J_piQYhhwo}#=$yFSyaN=JO}~S$ga!y zCZKuOXy-CHL35tIrKYa?D1rxZh&26F0g;p%x>*-JMKS2kRAp{yufi6-4Av-BDkTZg zZ+Ap-oHtF1{ZJa<^Xrzi87M;G8lYQ@-9+6r$;V7a253QLeQ-e!+Ps1C)2TC+yhhxB z&CvHeIUR;ca@$EPhO+Q%9ribqT8*XZe`gR%nWb)Jq@H92&Dh%+>bo-?p@hzn4D+?S zI_J!+EM4fLLdxox!e~AuRb1FtiL~oIIfsZI1Vv)6Qu_r3&Y>lj*=GG!kOo~%8Qe~J z`;vgQAouH8mLBm|dk$^lwE~L~D^3!6IzoFyX8R4)9uQ5M>=a_~G;cEYJh!QQQE?*D zbRNc=P$avYM8S8m7lSxyF-hf8Q5|G%!;3Yp_z#D3Ji%~#JJKYm4YfpXG@c@pKT&9R z9J|7c8e8dljWjBBm$L$ya~4d#dUPu$+OfO@d}O}f&ApTBX2)B02}Ww(l*y-=O^s}B+4VZvv)wpu@jx4O5|AzO zmdlBJe%qxCpOj)jL7b=^;f;iOO@P74qiRIAPnimp1nW{au3g?Ep=@)aoaWru8GrSB#k@wLFb z(LvZTRtBtnhD%Q78BH%cbI6RPs_wKA){CImLN4S|$pc{V!;&d{?^osRw{FeirNV}} zrP+=rt$a3TtIInrtuPy}cyGaxP=x-SULCX!wEdhBVSXqFh&{yuc*M7Q?f2q#f&AYX zK6|-K%k1o~{r1WDeLO=rs?SlhYA_dVlH{iMuY|R{^PTX+7RCa7P7{^r6yVVelSD+= zchr+XiuQ}cRvl`j*ftI=x(w~62ST7U-pi?dlY3+JvkU~U4X2bl;rGh;)GZJK6b9LZ znzLA)@p15$`m#pkf#ELAUy305@r!>A)TG^jBFagovpR6(Ol8(}Jb{l%bY;*D6m0QK zAy(c2qt8CyTxT-4kn=oD%z}f3ncJn2Et2amAZ3@t$pa)}Vba_;Io*}wK;hc&5hbdD z`=S>^ea3S@^>ooLI`;@aCyFEB_BP&3Y9R&;^guP6wz^OXOI%8Zh|WV#91y%$?%*Zv z>z5+$eXQf%UP`}2!ySYN8*mbEx^u{YZ&!kvbB929t)60?iPFi$fohH@A9lDE8#yzf zMQFOHGNrrIXAa?w7lEo@Om2)i)d3sIC=D0nZeL34o#FYVc@Kd{dgN{B@*EyjH#=OW|6LI zK*6O7j^GnvcBPBc9Y_RG+PE_$HLdV-qpDRNOs3vjvWzvGNWKacO_eFnfL=W=X;lP! z-0O?UR@FrFd4v?fFf8kS?H5ehMto(lE}N_==+do#FXZ37y(bP55`d1xeu0x2LOSw$J6Ypq@x% zfk1D}yU^=$GZ;)w%p?f$oTo36<(k%%O?NY>InxCI0^Oq4ma~ZG7;C(!n4E%%2MZXRE z1We}Cs5(-IhW>KT4xH%7`8{2)Ji*XUXUP~Et0z~Xu;$(wE42a|lLeVc=O8F#&RSU# z^=Ls!bk2k0=JzbY9%CFey|SG7gR>p)$@js_5(h$N2>ILI_P0I|)LHr$mq|qm3JA*I zAgY`c4?H_v`FLZtK04iBA$gRE$dt0oci3a(rU^@hXmg@c+jTclnK5^0qdk%sLUSwD zs(87U_j(c4uE%as6GM2)$>ZF!Ea|d$Gtn?ki{Nns%BhRK2#Arxdd@26jBG}}_BPg* zyVtW(5gqz0oYq(}#Ur5QFLlXO$4CmD==r9{0d^Hj?_JPBc1Mp8YSuOsEh@&CrjXZ7 zL)p0FH5L~DrJlVgeV(&y+yw*(c0+2k`H19f(xH13DL}&>3-Q!sT7&iaTpErly&!DK z*Rvxqq5Hw+2bho63{kki4Ko1K=-Mm=?wH5GtwyC^^uBg zg9Dy$d#98uLWCu$sRHYQxf6?rfsJBiWFd=+s9{x&vQH7FjlE z2KgLtTdM}3ICU`v$aq@S-%Z7ebgD?=C@%;`@|S0P6M?w(X`bxs+UDEXMule@`OyHQEMI;Hdvz=bj&{DW03n~zO6fKI6f&oh zLfA!fUN4(>I`yAWv4r;a!!mrs(?OK7EAv`jlo$zp6)!_5bn&yUx-H1s=-DjEsGR~a z9NN+AM$lICRRWzY!I27AHhmJ4tlC~V=ua%bd0Y|Yq^U4i3%d7jndu|`U&J4y{bqjC z-@5&`Lq(W45MO9wZ83j}ar1*C_P)2__Afl7$o2u>@CzBGyS*AIE&&7@L8iccMMFyZM$ zT9Fx}q8ti}MF!V{STQU0jaqlY0t&sAg70BnG1NyL8B3j6K&oiS*%-_$BlnPTWDxo@te-I>`nq$o)UM^IC^x}K|*G_ZHCWLMge>OS(NpMyb)cVHpL zkoOaVqeRDuYe!t0N%lVzSb|e%nt`PL4o4HyjmhO6N6BDIEyncOMd81Ktu^xzoW70^P$;0ZHe@LOD}ql)KbVh+oW6gf2B@PgeL0C7p&Mtc4lLYOG^@m5GLbR@A`ph4^OnksRr zW(TPp<|hJ`?dGP9PG{#>As*W5qIjvB-i{2EIxw)mM3cDpjQi7M%z)i$?XyS)xC02W zWG==42FG8GB8t$nXn>lJLBokOV_xWisZsX>bRt8?A`Z z%<4K>URLJLG>Y-dO^EeCZjNml$hva+XBJ3K_YBfpc<}QxDkP@uf?;jf4Ck7BeFfFr zj4Gka7OYNF0t$l>=Pm2H=T~@|2P^KKM!u?%&!LTpeR3XR_UB(~(^4W{b9pKpgXUf? z?SfQ?xjy4QixOlAMTtr|nJ=$nR3TRnQ(R#r!L&*8qc_CXw%d+l=E%_1=!Z5$vmc6*#Ebi6I4p~ywWN+-A3 zTVTjSi|nq=S{6JNpM8iB9$r!S;;U@RCp=jbhckN1 zN?oqMLLzB=W_2!AFr`AC_fvP&pp7yOXfwK$&UPb-!t=OJETT>=IX1w<*2rfkytkQ+ zKXVB2%cjKvTA~8P-XnsS>^KNGT09*6K=+ffY3^RGt80n8`xCx68c?qSsf|)-wokc! zm0UX99c1EJ0FsQ3JH|Io}k21;` z&ihh|q~qr3(WOZx9Hd&6G;7Qbo?l&SErnUqz-_cDmPbNl27!1&{hgIeZGA8 z1g|dM;&lmQg^DN#r$1z7>@s;NNfuFe;QQ>uW<+(6dP0Z>K3KD{7FA>+peXuA3DZf& zYsx9J+~HKc!3_9)w z5bpTx7>TRe7s{*6n(>-&qC#$o7W}O_J$R%xEWuPs0rBA`GiOlgycbec)^A!37L&<8 znXkRQR^@t#UTrj~rz#}&n`|j6cU(&77WkRM)3p}e;frUbJ;c7SXl(C;O(H5imaZOr zaYQEIQCR>q_pFqxwooP8RW5{v>dNyf5Og@`99u$G1`~FcJ5U_1bT#s8 zx;(L7j+2mHw`uEyefHFB^zN`d)55%&B?+GBU01tdQ9LW_Udw{?i?W75Lk7N#vP$q= z$E4i7;T+*-q+-XsY-Xz^xr{yJhl|3^an{SciZMQ;?6P?kbtxTS!c4X#IK>Jk#B(R_ zcz1KIw7{!kHMiW~SB$brO>8|r*06C8Oxug~?ZGe4LpLG0JnkAZx$ZO&iaSkug$bNm z9S_gU!$S3-rwT}UuZGp8l{0yAl#KK1-1zf5``atzGrNA__v?!ZvteIj$2CneJhe{{ z8KRaGM=)+1h4;4p)*<3zfI%-i-Vur*qrqW%{DYz80e`yQ`D@|t%P=tVb{-Gvz#_S4 z?xB%~ykB<)y}8@Leh7L6kMsi5yFRXZVCukz4FjdfIuCMGf>a*Uj`wn0KYFGf z{m5Af_261a8QiuxP#L{c-V7$It<5-LY2F1 zI_u`IcoNoUS&W|o7DE`Zysmg%@RjE1vK7z0IszDF>@&_Xh0S%Hg?kwc;c#%+!9<}x zL~=QPD^@vxi&~g)P7Td%q1k;jb)PiTkO{sIDGY$@+{JgI+~i_|r7TLWZ9EMG+Ri07 z$}QPb$Aq|ww<}qNYrRosMWCI#48T~4AFhkx6UhpLdp0N^s0JWD34L9y^)NX?uQgWA z(fnUcEoS80Cs9T#MOh|9q$tuw;CFOVqCo(Auv0tMG4yq`{O~+2!5H;bEP(dPR{(Kf#Y(Nk{zl61n#sUd>$WlAp)%Gtd|!G*R`hFB`hcDD*UTgGGXJpm z6vBXIcf&|`S*E`W;XlrCvy#eK9(7PwG69ew`q!n5VUYQeB1{l`Nf|CdhPk3SdM6uw zfNQ~#mN%Y>HM!wJbf$>T+Nh?}GE*rkRo|ee$c;MF=1l?sgD5%12&L{;rgmtg6SouK z0Y}3!Yzgd}#qfK&N()BzzWhIa=iWfn_iEGMpfcQsl_hHv7juU5KAz2UFk?XgdA?Z5^1v=V07rK z8A`}s@rem&Zsz-W{3PruyR+|+*o`-CSu_MgNH_c3|= zKM#FR;)A&vpeYBV4GC~f@o40x@-)~h?@w_cM>sI(oz71sF_FyTZAyVz zG3#@B{4Ew|a|1&KoEt8sRpERrDulCEA(BHE(`7woV)Cq0o82#YSiL_!q0@PwLUoq` zCQMxHAt|CI?HV3==SyhU9}BUo%LJyvSGsWbcwxZNhTP0~quyq=hVsh_Cl*x+@#=++ zRm;3&@Ce*_w{~lDp2e=|^rHl|DjYQO=vudr?=M|RWF^+oErL^tQ#RLi=SZI-c9@vi z8mj}KA#>UWA2$)09BR)qs5Y!nZlR_P!8t7Sf~H2+-8zNCu*xCg?lX5$PLoLvsrbH2}U)M%0_p9^+CKCgCek?+>UrNJB`MWq+D|MCaq=cB`HY z20%WhxjuFb6VW)hN;{Cr#UApq92(PxuYnYo#>6o$%$I$)KUOb1ugW_WVSY>tT1b|@ z+781P?^~i7l3-=S9q@GIC}oo=uMW$Z&z!$8#fYF0pS~S0^N4(bmU>GN$;NA zHmIg9UG~Lm7S+V#Vxm#w4gDW>A)VK z)8i9@NI}dQZn!5s`K*%|TBxW)Z-s*yvhEP^;PLQ~PJ?|>%u$^(qlQQ7F@`z_5Rvd? XgbUw_C;8cdAMtl2Q-uiui7CfG*76VO diff --git a/integration/mainnet/debug_getModifiedAccountsByNumber/test_18.json b/integration/mainnet/debug_getModifiedAccountsByNumber/test_18.json index b3a058f3..804d07c9 100644 --- a/integration/mainnet/debug_getModifiedAccountsByNumber/test_18.json +++ b/integration/mainnet/debug_getModifiedAccountsByNumber/test_18.json @@ -1,258 +1,268 @@ [ - { - "request": { - "id": 1, - "jsonrpc": "2.0", - "method": "debug_getModifiedAccountsByNumber", - "params": [ - 17000000, - 17000001 - ] - }, + { + "test": { + "id": "debug_getModifiedAccountsByNumber_17000000_17000001", + "reference": "", + "description": "modified accounts between block 17000000 and 17000001", + "metadata": { "response": { - "id": 1, - "jsonrpc": "2.0", - "result": [ - "0x86c891d66406ae7ff773dd0c8e8d7566c320fad1", - "0x32400084c286cf3e17e7b677ea9583e60a000324", - "0x4393e0f0ad8993a6ac7e7eb6eb84789f5d4165d1", - "0x704f6d19fad4f4f6e88e60c3c5648f6fe307f141", - "0x7d12e2b0b687b5a13fc3d69fbbdec2c26bf7b1a6", - "0x2b00e955edf3c07b73c1363b1968ccc9c1062eda", - "0x49ba6b2cc56df7c450a4b99739144395a535d660", - "0x95ed53d0a431148481fccb70406df111013adb38", - "0xda60730e1feaa7d8321f62ffb069edd869e57d02", - "0xa0300b4edb87c33c0fcf20d968575595d11c9249", - "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", - "0xe20be2e21640c104cf1bc6e14134dc7c8ddda9b4", - "0xeebc1b0e0f19bd03502ada32cb7a9e217568dceb", - "0x2a42697b8d2e891aa2b31d543a262814228a877c", - "0x503828976d22510aad0201ac7ec88293211d23da", - "0x6120991c423f3566753d3c6c91a5b50d7d2461b4", - "0x7ee0a0c3f3de7dfbcf6cb29c305dd126ef43b1f3", - "0x0d8775f648430679a709e98d2b0cb6250d2887ef", - "0xcaf79308d05e0d5b88d2b916dc8047935361ae1d", - "0x2b0051dbdfdfc78bb4ed6afba4abd35f817bc2a1", - "0x6dfc34609a05bc22319fa4cce1d1e2929548c0d7", - "0x08b067ad41e45babe5bbb52fc2fe7f692f628b06", - "0xabf10d19f028aab53c3c7bc27bcfddb96c845476", - "0xeb2629a2734e272bcc07bda959863f316f4bd4cf", - "0x6ae6ec3311c1ff8f6aca65121284962f7db321c4", - "0x56ca43cbe18ecae79e44f50f3ee08aeb73150b07", - "0xae2fc483527b8ef99eb5d9b44875f005ba1fae13", - "0x8a15d48774b795afcac528102b37286431fcf79e", - "0xb846f231b102f98e727d2b9403822025f53a16c9", - "0xfda8595336eeb783516b6a00b81b8d7d800bb09d", - "0x71660c4005ba85c37ccec55d0c4493e66fe775d3", - "0x95a9bd206ae52c4ba8eecfc93d18eacdd41c88cc", - "0xd90825b15f70b64f81bab600f7f56fe3f86747ce", - "0xe52b9c9bafde360287185a6b21df4f87201afc95", - "0x220283a32229e40b5dfa7badd6da8dc0645a0e09", - "0x3610dfa473cee0926e410f7b30cfb9c76b644ef8", - "0x4a555b0506876ddcf9ddc4989a3ad51c86c2690f", - "0x6aa3eaeafbb566b4a9daf706b9e457548f41f219", - "0x050a2e98dc76a74e90713abc104580b06812ade3", - "0xfbb1b73c4f0bda4f67dca266ce6ef42f520fbb98", - "0x9f5c0058c54b96d8c9acba347cfd2a0e8ee26618", - "0x2260fac5e5542a773aa44fbcfedf7c193bc2c599", - "0x2c7d8005dcb8991f961304465bcf4f01fda5c2a9", - "0x53eaba324db362f4987c7ad5aee4a3abdf16ef4c", - "0x9a91e9fb69bed0266969fde441dfc52f0547d30e", - "0x09d74ebe57381bdec31a6ae71f7383e359a5a22e", - "0x9023307587336d8cdc61c70fa3fbb8d29c925c9f", - "0xdaddde57a6eefcad3930167189776e4c706e9336", - "0x1a8a81651582f74b283a6f46bec63394cd01572b", - "0x77cb254ced12775b2013fd079f2c2fab53be29d7", - "0x33908e71abbc5b7819b1ff8249296201496dd2f8", - "0x55ab51bea076e9f0c9549b5a0a806635900b5db0", - "0xa5e9c917b4b821e4e0a5bbefce078ab6540d6b5e", - "0xcf1dc766fc2c62bef0b67a8de666c8e67acf35f6", - "0x054af6eecdb191dc9f2ec1bfe4da48a0a47c58da", - "0xa9d1e08c7793af67e9d92fe308d5697fb81d3e43", - "0xac084df0969bd1e9bb8a2079cf317a30476210f6", - "0xf83b71146ca378c6b23cef675d83fbc399a908a8", - "0x92ee1881763d5dd0979d8dcf2b7874d406954e8c", - "0xb6a2208ec57110bdfb789d8c21cd86491e474a12", - "0xc5ab97ee3048ae3b3430ae8e7ce2c0cc09fe44de", - "0x0e0d37dff9872b4418c915c890126e55ea10bdb0", - "0x514910771af9ca656af840dff83e8264ecf986ca", - "0x77182f4f182432945b520e054d3a7b5ab2a83e22", - "0x8261cb7a33cc31c323630984f8b4eac3bac38ea7", - "0x3c8a9e070c074fda64d7611b4340968edb5803c7", - "0x884ba86faa29745b6c40b7098567a393e91335cf", - "0xd06c36055f2aeba6a2acef54c392b60a82625e37", - "0xe4fcd3867b633b9f8f097bbeebd08ba20bc834df", - "0x12a8c19dacb9d3e4c3bd763ea3965db9c68b7b15", - "0x5d94a7740b4d76a488dc6abe8839d033ad296f85", - "0x692671ff2de25c72ea9d6054119a42cf5031e766", - "0xd78301fa1e0aaf836b5a9762162bf900b0ef9b1e", - "0xc758d5718147c8c5bc440098d623cf8d96b95b83", - "0xd68356dcf4c619474901ea27f4f06bdf8cd3a2e7", - "0xfcba0693fc16dcb2a4e8fa7ed3da31f5296993e4", - "0x021818270bf16fc5038de2b20a1b50c05828f48c", - "0x1111111254eeb25477b68fb85ed929f73a960582", - "0x42eb5e1a075d397024099173d3deaa3e7fd380b0", - "0xc6c7565644ea1893ad29182f7b6961aab7edfed0", - "0xf4b4e6fcd0cfb6e3d70821c136b9c5aa0e0936f2", - "0x00005ea00ac477b1030ce78506496e8c2de24bf5", - "0x056607a193892497ec62f7d02e533b49dbf3378f", - "0x5c3d46b478b8dfcbade9322255ca0a44be9c951d", - "0xa01d803e2734c542d13a13772deced63cd6453bf", - "0x76c93a600bc1e01d70000167b32f37bf5a9b908f", - "0x8e6cd950ad6ba651f6dd608dc70e5886b1aa6b24", - "0x8f71858ac4c9ed59444680e158a5ac4c72b56d39", - "0x0e32ed5d0dfbee0fb99536a744319aa2e2d155d9", - "0x86ac86af1fd9a2cb586a19e325be5d68439a6f31", - "0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48", - "0x0000a26b00c1f0df003000390027140000faa719", - "0x77696bb39917c91a0c3908d577d5e322095425ca", - "0xf3702e991422130cbed8bfc6895e51f0189d380d", - "0x78db48873fa142f3bd7bac93c2c84c45b9f5adf8", - "0xf0e42da949aaa6fb754a0e7145b17d33554eeb23", - "0x2bdcb354ed3dff5585b93ec2cd8e7d3a45c364f2", - "0x3071be11f9e92a9eb28f305e1fa033cd102714e7", - "0x3820e79deb5e60f6f488fa2a62c8e190cc69bb47", - "0x5df7511872ba85626cf1ccedbcefc347edc2375e", - "0x6b1a8f210ec6b7b6643cea3583fb0c079f367898", - "0x9c1e863c54a43d2075a42919e6b5beeb81314376", - "0xdac17f958d2ee523a2206206994597c13d831ec7", - "0x850d754a640f640b8d9844518f584ee131a57c9d", - "0x93ebe894b914ce093c0582065d72ff608c2dda3f", - "0xb5d85cbf7cb3ee0d56b3bb207d5fc4b82f43f511", - "0xf34960d9d60be18cc1d5afc1a6f012a723a28811", - "0x3be65bd36c1ad28d33acc1c9dd8338a2706ca000", - "0x59d75c99a179aea21ffbed75443aeb3a85451cdd", - "0x73b66a14599ec4741a855fc25da0e1664a3bd44a", - "0xd42f958e1c3e2a10e5d66343c4c9a57726e5b4b6", - "0x5755ab845ddeab27e1cfce00cd629b2e135acc3d", - "0x68701fec3909c62696342b64931d295b9d483e37", - "0xac61b9156f556c5be7d56a199515592c0550d5d1", - "0x0f7b3f5a8fed821c5eb60049538a548db2d479ce", - "0x3cd751e6b0078be393132286c442345e5dc49699", - "0x7830c87c02e56aff27fa8ab1241711331fa86f43", - "0xa0ef786bf476fe0810408caba05e536ac800ff86", - "0xa81710a5b94be40d49f239373b28c997bec0eecc", - "0xb8e86549835d23cdc85668265ba37184593e1302", - "0xc9e48bf45fefead335c8679afe7c5f22e6060997", - "0xd26114cd6ee289accf82350c8d8487fedb8a0c07", - "0x125204f682b19eae1abeb8b813f000e6ff57d1b3", - "0x558ceb41bca5a93d2c35e423cde27354cd284b74", - "0x690b9a9e9aa1c9db991c7721a92d351db4fac990", - "0x6b75d8af000000e20b7a7ddf000ba900b4009a80", - "0xe925e303e6fa2034d641428bccec08fa4cce5d5f", - "0xc3139a1d06811120fc2945b371745376e3388889", - "0x7b93e6eda099146b0656b18cab3ab9f1cbc8dcee", - "0x8967ba97f39334c9e6f8e34b8a3d7556306af568", - "0x9a78b2b57ad66cd3d40b139e04fe6c109150d9be", - "0xab4ce2a08cbea809692218dd7841f681b80069a9", - "0xc964dff79f494056c7a3ebd3130de3fcae8e78d1", - "0x53eb3ea47643e87e8f25dd997a37b3b5260e7336", - "0x9512f0932dce413aec8ac407c2f556ec8b3766d3", - "0xb61ce2b4347bf9a028611f98dbdc8658a47457a2", - "0xc229fcff8882cfc38bafbf16d993a67a89975915", - "0x0000000000a39bb272e79075ade125fd351887ac", - "0xb739d0895772dbb71a89a3754a160269068f0d45", - "0x3d0d7135f00d397ad605d6399f7ed48caa3b1157", - "0x6d2e03b7effeae98bd302a9f836d0d6ab0002766", - "0xf8209a55a4579207610a9ecd080bf3b8899d0e69", - "0x0fe0ed7f146cb12e4b9759aff4fa8d34571802ca", - "0x3cf314571da3e56eaee61af2b30af56f75d3a602", - "0x9821b00cd8d66273e8464c8207792b38c9cb35e3", - "0xd015422879a1308ba557510345e944b912b9ab73", - "0x381b7d64303a2a5251ac12ee147ffdb337da5969", - "0x583346d163a7e0994d64bf76b37240fd27255862", - "0xd601c171851c460082ace709f665a9566586f14b", - "0xdefa4e8a7bcba345f687a2f1456f5edd9ce97202", - "0x4eb19b39376445aafd4896f8a752542a6cc6ee7a", - "0xc662c410c0ecf747543f5ba90660f6abebd9c8c4", - "0x000000000000ad05ccc4f10045630fb830b95127", - "0x22cde3b03dc46425c73cdebd020b22c66e072096", - "0x31de4b049179a1c229b21aaf45607fffc86fd678", - "0x4a24c1989e5dff0de40f5804b47623b6b4300b04", - "0x1bd1e416482c472e276d8f96a5f03af2c95db55a", - "0xef6fc863ee706a7a1e7df5dac42b105b2fe717e5", - "0x650c1e71fd009dbd8344bb63a8727b538397b5d3", - "0x04e0774ed6bc70f9cd8035416311ae7434750b1e", - "0x2db1d8cdf1abe8c70b531a790cdf2ff38aecf652", - "0x8ebf5bd59abf1126a0d61016690396652eb63adc", - "0xb82066c91c65c6afc8545ada2b05f1c309e2af38", - "0x36ff4dae0e88113d68b1209e245b0e3af92e9d58", - "0x595063172c85b1e8ac2fe74fcb6b7dc26844cc2d", - "0xc5293c9328e59bbe80c4d92ebbc3253e48d21397", - "0x39fb2ccddff821a231700067592b8db9d8e839cc", - "0x52efd3617f86070c1b1f17ed7961aaf205ad3363", - "0x8c8d7c46219d9205f056f28fee5950ad564d7465", - "0xadf2429393ba26f8db6419d303d585ed1b1ef156", - "0xd068c7c941fbbd2300cb2f1841858c2643722dc7", - "0x06450dee7fd2fb8e39061434babcfc05599a6fb8", - "0x2ba122196510ddb40d573f068308ac028e3e9f89", - "0x52de0bd1fb06696a3fe4c427bdf2489fa5293482", - "0xba411f2b432e28eb40390ea85a22c0b45eb040d7", - "0x91ef7a5f288c7f6e3723651c307174b2375b6065", - "0xd640c898b0902bd02f69de0fe8d0bd560956db76", - "0x386066deb6c543496bddc24db897707369bf3644", - "0x56d80147073584728ec4dd7cd582c7f896a05002", - "0x7bd33162556def9e75b0b5a41b0566e38daafff2", - "0x89d079bbaaf3fc0ceafb94df622104892027c332", - "0xf89d7b9c864f589bbf53a82105107622b35eaa40", - "0x8f7976c3a990acc6dce76d96fdaee6a04cb881b1", - "0xcda3d331eee54e5b2224270e40f24d96abd469d0", - "0xec6294f4ee7600b82627ed8bc6981b6c3b1355fa", - "0xed328e9c1179a30ddc1e7595e036aed8760c22af", - "0x0000000000000000000000000000000000000004", - "0x1689a089aa12d6cbbd88bc2755e4c192f8702000", - "0xa9ac3b077c4b705cf7e0d6eeab60c734c96bcb7f", - "0xae0ee0a63a2ce6baeeffe56e7714fb4efe48d419", - "0x09cf4c12514091f4dba650116dc7d0cc907835fc", - "0x0d8e3b855c28a63295737a7f1a175d3cde44bcfa", - "0xc2536728f200674740a8e8b97c17f93e34e59302", - "0x000000000022d473030f116ddee9f6b43ac78ba3", - "0x2bc7dc13db7fb77e373c8e534cc348ce729d9a6d", - "0x6758647a4cd6b4225b922b456be5c05359012032", - "0x7f58558c099ee497abf2019eecf4a99116a44e62", - "0xa109209a2380fd4454b0364c4689f6de18ad18cc", - "0xa4eda560900a8522f360fb03b0605b6c2ab35906", - "0xaa8330fb2b4d5d07abfe7a72262752a8505c6b37", - "0xc10b93574f6966035074a77b314c2a3a565e2423", - "0x111111111117dc0aa78b770fa6a738034120c302", - "0x1e3534e9cf4a9f91cef18694bd8393dc189a5276", - "0x881d40237659c251811cec9c364ef91dc08d300c", - "0x889263c98aa744ed6c4baa995d01f5b62af0404b", - "0xce1f0626083a00eaef1d85dd2979abb5b1c3657c", - "0xdef1c0ded9bec7f1a1670819833240f027b25eff", - "0xfa55aa3ac0b1738b92ec4bc172e9520aae54e135", - "0x25d4e7fe624fcc6e7f0b52d142f40c9acecf039a", - "0xc5a91853dbe604c009a1b80974a8b5e4d8ccc7e4", - "0x2cde4542f6f9d35c694a8480e28482093d866f52", - "0x5d0d47f2eea897c2c56f79ff18ce0e3eb24bdd71", - "0x7a250d5630b4cf539739df2c5dacb4c659f2488d", - "0xc36442b4a4522e871399cd717abdd847ab11fe88", - "0x99d24b435e5a9b07098c10bcb99f6f57c3b5e75a", - "0xc728f2dac6cef1df193f05e69dec73cf1bc1b89c", - "0xdad6a675ff6216c456b467570a7fc4801f495204", - "0xdb0535bd9db1fe5cad41490beb0c7410538e93ee", - "0x0a263bf8367411c7d17c4bdb637850c60cae4e9e", - "0x281dec1fbfe93191b878236fe4e68433585b27af", - "0xe66b31678d6c16e9ebf358268a790b763c133750", - "0xf991e05fb506649cb43caf539f8e3ad91f43ef31", - "0x00000000000001ad428e4906ae43d8f9852d0dd6", - "0x0df407bc6abe9af2093dcb4c974e18d40a6a381a", - "0xb95f8e305c8ca864962c791a09b315fdb89c8fdd", - "0xef1c6e67703c7bd7107eed8303fbe6ec2554bf6b", - "0x12999264f7839df4b7bfc748cc87035032cf04a7", - "0x382ffce2287252f930e1c8dc9328dac5bf282ba1", - "0x495f947276749ce646f68ac8c248420045cb7b5e", - "0x534044106b09d0c15d3248917a23d52646d374bb", - "0x69ec82a7682168322316408d772164ba5f8e1fda", - "0x75e89d5979e4f6fba9f97c104c2f0afb3f1dcb88", - "0xbdb7b80e671774989def74c3a1227f95cea83db4", - "0x151b381058f91cf871e7ea1ee83c45326f61e96d", - "0x2427e88ef4ed1e6d0befa29b04638efc1cf5127a", - "0xd1eefc9d2fd96a80b1372cb72807008ac4ff410b", - "0xd5df655087d99b7b720a5bc8711f296180a4f44b", - "0x0000000000000000000000000000000000000001", - "0x5d00d312e171be5342067c09bae883f9bcb2003b" - ] + "pathOptions": [{"@": ["result"], "^": ["SET"]}] } + } + }, + "request": { + "id": 1, + "jsonrpc": "2.0", + "method": "debug_getModifiedAccountsByNumber", + "params": [ + 17000000, + 17000001 + ] + }, + "response": { + "id": 1, + "jsonrpc": "2.0", + "result": [ + "0x86c891d66406ae7ff773dd0c8e8d7566c320fad1", + "0x32400084c286cf3e17e7b677ea9583e60a000324", + "0x4393e0f0ad8993a6ac7e7eb6eb84789f5d4165d1", + "0x704f6d19fad4f4f6e88e60c3c5648f6fe307f141", + "0x7d12e2b0b687b5a13fc3d69fbbdec2c26bf7b1a6", + "0x2b00e955edf3c07b73c1363b1968ccc9c1062eda", + "0x49ba6b2cc56df7c450a4b99739144395a535d660", + "0x95ed53d0a431148481fccb70406df111013adb38", + "0xda60730e1feaa7d8321f62ffb069edd869e57d02", + "0xa0300b4edb87c33c0fcf20d968575595d11c9249", + "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", + "0xe20be2e21640c104cf1bc6e14134dc7c8ddda9b4", + "0xeebc1b0e0f19bd03502ada32cb7a9e217568dceb", + "0x2a42697b8d2e891aa2b31d543a262814228a877c", + "0x503828976d22510aad0201ac7ec88293211d23da", + "0x6120991c423f3566753d3c6c91a5b50d7d2461b4", + "0x7ee0a0c3f3de7dfbcf6cb29c305dd126ef43b1f3", + "0x0d8775f648430679a709e98d2b0cb6250d2887ef", + "0xcaf79308d05e0d5b88d2b916dc8047935361ae1d", + "0x2b0051dbdfdfc78bb4ed6afba4abd35f817bc2a1", + "0x6dfc34609a05bc22319fa4cce1d1e2929548c0d7", + "0x08b067ad41e45babe5bbb52fc2fe7f692f628b06", + "0xabf10d19f028aab53c3c7bc27bcfddb96c845476", + "0xeb2629a2734e272bcc07bda959863f316f4bd4cf", + "0x6ae6ec3311c1ff8f6aca65121284962f7db321c4", + "0x56ca43cbe18ecae79e44f50f3ee08aeb73150b07", + "0xae2fc483527b8ef99eb5d9b44875f005ba1fae13", + "0x8a15d48774b795afcac528102b37286431fcf79e", + "0xb846f231b102f98e727d2b9403822025f53a16c9", + "0xfda8595336eeb783516b6a00b81b8d7d800bb09d", + "0x71660c4005ba85c37ccec55d0c4493e66fe775d3", + "0x95a9bd206ae52c4ba8eecfc93d18eacdd41c88cc", + "0xd90825b15f70b64f81bab600f7f56fe3f86747ce", + "0xe52b9c9bafde360287185a6b21df4f87201afc95", + "0x220283a32229e40b5dfa7badd6da8dc0645a0e09", + "0x3610dfa473cee0926e410f7b30cfb9c76b644ef8", + "0x4a555b0506876ddcf9ddc4989a3ad51c86c2690f", + "0x6aa3eaeafbb566b4a9daf706b9e457548f41f219", + "0x050a2e98dc76a74e90713abc104580b06812ade3", + "0xfbb1b73c4f0bda4f67dca266ce6ef42f520fbb98", + "0x9f5c0058c54b96d8c9acba347cfd2a0e8ee26618", + "0x2260fac5e5542a773aa44fbcfedf7c193bc2c599", + "0x2c7d8005dcb8991f961304465bcf4f01fda5c2a9", + "0x53eaba324db362f4987c7ad5aee4a3abdf16ef4c", + "0x9a91e9fb69bed0266969fde441dfc52f0547d30e", + "0x09d74ebe57381bdec31a6ae71f7383e359a5a22e", + "0x9023307587336d8cdc61c70fa3fbb8d29c925c9f", + "0xdaddde57a6eefcad3930167189776e4c706e9336", + "0x1a8a81651582f74b283a6f46bec63394cd01572b", + "0x77cb254ced12775b2013fd079f2c2fab53be29d7", + "0x33908e71abbc5b7819b1ff8249296201496dd2f8", + "0x55ab51bea076e9f0c9549b5a0a806635900b5db0", + "0xa5e9c917b4b821e4e0a5bbefce078ab6540d6b5e", + "0xcf1dc766fc2c62bef0b67a8de666c8e67acf35f6", + "0x054af6eecdb191dc9f2ec1bfe4da48a0a47c58da", + "0xa9d1e08c7793af67e9d92fe308d5697fb81d3e43", + "0xac084df0969bd1e9bb8a2079cf317a30476210f6", + "0xf83b71146ca378c6b23cef675d83fbc399a908a8", + "0x92ee1881763d5dd0979d8dcf2b7874d406954e8c", + "0xb6a2208ec57110bdfb789d8c21cd86491e474a12", + "0xc5ab97ee3048ae3b3430ae8e7ce2c0cc09fe44de", + "0x0e0d37dff9872b4418c915c890126e55ea10bdb0", + "0x514910771af9ca656af840dff83e8264ecf986ca", + "0x77182f4f182432945b520e054d3a7b5ab2a83e22", + "0x8261cb7a33cc31c323630984f8b4eac3bac38ea7", + "0x3c8a9e070c074fda64d7611b4340968edb5803c7", + "0x884ba86faa29745b6c40b7098567a393e91335cf", + "0xd06c36055f2aeba6a2acef54c392b60a82625e37", + "0xe4fcd3867b633b9f8f097bbeebd08ba20bc834df", + "0x12a8c19dacb9d3e4c3bd763ea3965db9c68b7b15", + "0x5d94a7740b4d76a488dc6abe8839d033ad296f85", + "0x692671ff2de25c72ea9d6054119a42cf5031e766", + "0xd78301fa1e0aaf836b5a9762162bf900b0ef9b1e", + "0xc758d5718147c8c5bc440098d623cf8d96b95b83", + "0xd68356dcf4c619474901ea27f4f06bdf8cd3a2e7", + "0xfcba0693fc16dcb2a4e8fa7ed3da31f5296993e4", + "0x021818270bf16fc5038de2b20a1b50c05828f48c", + "0x1111111254eeb25477b68fb85ed929f73a960582", + "0x42eb5e1a075d397024099173d3deaa3e7fd380b0", + "0xc6c7565644ea1893ad29182f7b6961aab7edfed0", + "0xf4b4e6fcd0cfb6e3d70821c136b9c5aa0e0936f2", + "0x00005ea00ac477b1030ce78506496e8c2de24bf5", + "0x056607a193892497ec62f7d02e533b49dbf3378f", + "0x5c3d46b478b8dfcbade9322255ca0a44be9c951d", + "0xa01d803e2734c542d13a13772deced63cd6453bf", + "0x76c93a600bc1e01d70000167b32f37bf5a9b908f", + "0x8e6cd950ad6ba651f6dd608dc70e5886b1aa6b24", + "0x8f71858ac4c9ed59444680e158a5ac4c72b56d39", + "0x0e32ed5d0dfbee0fb99536a744319aa2e2d155d9", + "0x86ac86af1fd9a2cb586a19e325be5d68439a6f31", + "0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48", + "0x0000a26b00c1f0df003000390027140000faa719", + "0x77696bb39917c91a0c3908d577d5e322095425ca", + "0xf3702e991422130cbed8bfc6895e51f0189d380d", + "0x78db48873fa142f3bd7bac93c2c84c45b9f5adf8", + "0xf0e42da949aaa6fb754a0e7145b17d33554eeb23", + "0x2bdcb354ed3dff5585b93ec2cd8e7d3a45c364f2", + "0x3071be11f9e92a9eb28f305e1fa033cd102714e7", + "0x3820e79deb5e60f6f488fa2a62c8e190cc69bb47", + "0x5df7511872ba85626cf1ccedbcefc347edc2375e", + "0x6b1a8f210ec6b7b6643cea3583fb0c079f367898", + "0x9c1e863c54a43d2075a42919e6b5beeb81314376", + "0xdac17f958d2ee523a2206206994597c13d831ec7", + "0x850d754a640f640b8d9844518f584ee131a57c9d", + "0x93ebe894b914ce093c0582065d72ff608c2dda3f", + "0xb5d85cbf7cb3ee0d56b3bb207d5fc4b82f43f511", + "0xf34960d9d60be18cc1d5afc1a6f012a723a28811", + "0x3be65bd36c1ad28d33acc1c9dd8338a2706ca000", + "0x59d75c99a179aea21ffbed75443aeb3a85451cdd", + "0x73b66a14599ec4741a855fc25da0e1664a3bd44a", + "0xd42f958e1c3e2a10e5d66343c4c9a57726e5b4b6", + "0x5755ab845ddeab27e1cfce00cd629b2e135acc3d", + "0x68701fec3909c62696342b64931d295b9d483e37", + "0xac61b9156f556c5be7d56a199515592c0550d5d1", + "0x0f7b3f5a8fed821c5eb60049538a548db2d479ce", + "0x3cd751e6b0078be393132286c442345e5dc49699", + "0x7830c87c02e56aff27fa8ab1241711331fa86f43", + "0xa0ef786bf476fe0810408caba05e536ac800ff86", + "0xa81710a5b94be40d49f239373b28c997bec0eecc", + "0xb8e86549835d23cdc85668265ba37184593e1302", + "0xc9e48bf45fefead335c8679afe7c5f22e6060997", + "0xd26114cd6ee289accf82350c8d8487fedb8a0c07", + "0x125204f682b19eae1abeb8b813f000e6ff57d1b3", + "0x558ceb41bca5a93d2c35e423cde27354cd284b74", + "0x690b9a9e9aa1c9db991c7721a92d351db4fac990", + "0x6b75d8af000000e20b7a7ddf000ba900b4009a80", + "0xe925e303e6fa2034d641428bccec08fa4cce5d5f", + "0xc3139a1d06811120fc2945b371745376e3388889", + "0x7b93e6eda099146b0656b18cab3ab9f1cbc8dcee", + "0x8967ba97f39334c9e6f8e34b8a3d7556306af568", + "0x9a78b2b57ad66cd3d40b139e04fe6c109150d9be", + "0xab4ce2a08cbea809692218dd7841f681b80069a9", + "0xc964dff79f494056c7a3ebd3130de3fcae8e78d1", + "0x53eb3ea47643e87e8f25dd997a37b3b5260e7336", + "0x9512f0932dce413aec8ac407c2f556ec8b3766d3", + "0xb61ce2b4347bf9a028611f98dbdc8658a47457a2", + "0xc229fcff8882cfc38bafbf16d993a67a89975915", + "0x0000000000a39bb272e79075ade125fd351887ac", + "0xb739d0895772dbb71a89a3754a160269068f0d45", + "0x3d0d7135f00d397ad605d6399f7ed48caa3b1157", + "0x6d2e03b7effeae98bd302a9f836d0d6ab0002766", + "0xf8209a55a4579207610a9ecd080bf3b8899d0e69", + "0x0fe0ed7f146cb12e4b9759aff4fa8d34571802ca", + "0x3cf314571da3e56eaee61af2b30af56f75d3a602", + "0x9821b00cd8d66273e8464c8207792b38c9cb35e3", + "0xd015422879a1308ba557510345e944b912b9ab73", + "0x381b7d64303a2a5251ac12ee147ffdb337da5969", + "0x583346d163a7e0994d64bf76b37240fd27255862", + "0xd601c171851c460082ace709f665a9566586f14b", + "0xdefa4e8a7bcba345f687a2f1456f5edd9ce97202", + "0x4eb19b39376445aafd4896f8a752542a6cc6ee7a", + "0xc662c410c0ecf747543f5ba90660f6abebd9c8c4", + "0x000000000000ad05ccc4f10045630fb830b95127", + "0x22cde3b03dc46425c73cdebd020b22c66e072096", + "0x31de4b049179a1c229b21aaf45607fffc86fd678", + "0x4a24c1989e5dff0de40f5804b47623b6b4300b04", + "0x1bd1e416482c472e276d8f96a5f03af2c95db55a", + "0xef6fc863ee706a7a1e7df5dac42b105b2fe717e5", + "0x650c1e71fd009dbd8344bb63a8727b538397b5d3", + "0x04e0774ed6bc70f9cd8035416311ae7434750b1e", + "0x2db1d8cdf1abe8c70b531a790cdf2ff38aecf652", + "0x8ebf5bd59abf1126a0d61016690396652eb63adc", + "0xb82066c91c65c6afc8545ada2b05f1c309e2af38", + "0x36ff4dae0e88113d68b1209e245b0e3af92e9d58", + "0x595063172c85b1e8ac2fe74fcb6b7dc26844cc2d", + "0xc5293c9328e59bbe80c4d92ebbc3253e48d21397", + "0x39fb2ccddff821a231700067592b8db9d8e839cc", + "0x52efd3617f86070c1b1f17ed7961aaf205ad3363", + "0x8c8d7c46219d9205f056f28fee5950ad564d7465", + "0xadf2429393ba26f8db6419d303d585ed1b1ef156", + "0xd068c7c941fbbd2300cb2f1841858c2643722dc7", + "0x06450dee7fd2fb8e39061434babcfc05599a6fb8", + "0x2ba122196510ddb40d573f068308ac028e3e9f89", + "0x52de0bd1fb06696a3fe4c427bdf2489fa5293482", + "0xba411f2b432e28eb40390ea85a22c0b45eb040d7", + "0x91ef7a5f288c7f6e3723651c307174b2375b6065", + "0xd640c898b0902bd02f69de0fe8d0bd560956db76", + "0x386066deb6c543496bddc24db897707369bf3644", + "0x56d80147073584728ec4dd7cd582c7f896a05002", + "0x7bd33162556def9e75b0b5a41b0566e38daafff2", + "0x89d079bbaaf3fc0ceafb94df622104892027c332", + "0xf89d7b9c864f589bbf53a82105107622b35eaa40", + "0x8f7976c3a990acc6dce76d96fdaee6a04cb881b1", + "0xcda3d331eee54e5b2224270e40f24d96abd469d0", + "0xec6294f4ee7600b82627ed8bc6981b6c3b1355fa", + "0xed328e9c1179a30ddc1e7595e036aed8760c22af", + "0x0000000000000000000000000000000000000004", + "0x1689a089aa12d6cbbd88bc2755e4c192f8702000", + "0xa9ac3b077c4b705cf7e0d6eeab60c734c96bcb7f", + "0xae0ee0a63a2ce6baeeffe56e7714fb4efe48d419", + "0x09cf4c12514091f4dba650116dc7d0cc907835fc", + "0x0d8e3b855c28a63295737a7f1a175d3cde44bcfa", + "0xc2536728f200674740a8e8b97c17f93e34e59302", + "0x000000000022d473030f116ddee9f6b43ac78ba3", + "0x2bc7dc13db7fb77e373c8e534cc348ce729d9a6d", + "0x6758647a4cd6b4225b922b456be5c05359012032", + "0x7f58558c099ee497abf2019eecf4a99116a44e62", + "0xa109209a2380fd4454b0364c4689f6de18ad18cc", + "0xa4eda560900a8522f360fb03b0605b6c2ab35906", + "0xaa8330fb2b4d5d07abfe7a72262752a8505c6b37", + "0xc10b93574f6966035074a77b314c2a3a565e2423", + "0x111111111117dc0aa78b770fa6a738034120c302", + "0x1e3534e9cf4a9f91cef18694bd8393dc189a5276", + "0x881d40237659c251811cec9c364ef91dc08d300c", + "0x889263c98aa744ed6c4baa995d01f5b62af0404b", + "0xce1f0626083a00eaef1d85dd2979abb5b1c3657c", + "0xdef1c0ded9bec7f1a1670819833240f027b25eff", + "0xfa55aa3ac0b1738b92ec4bc172e9520aae54e135", + "0x25d4e7fe624fcc6e7f0b52d142f40c9acecf039a", + "0xc5a91853dbe604c009a1b80974a8b5e4d8ccc7e4", + "0x2cde4542f6f9d35c694a8480e28482093d866f52", + "0x5d0d47f2eea897c2c56f79ff18ce0e3eb24bdd71", + "0x7a250d5630b4cf539739df2c5dacb4c659f2488d", + "0xc36442b4a4522e871399cd717abdd847ab11fe88", + "0x99d24b435e5a9b07098c10bcb99f6f57c3b5e75a", + "0xc728f2dac6cef1df193f05e69dec73cf1bc1b89c", + "0xdad6a675ff6216c456b467570a7fc4801f495204", + "0xdb0535bd9db1fe5cad41490beb0c7410538e93ee", + "0x0a263bf8367411c7d17c4bdb637850c60cae4e9e", + "0x281dec1fbfe93191b878236fe4e68433585b27af", + "0xe66b31678d6c16e9ebf358268a790b763c133750", + "0xf991e05fb506649cb43caf539f8e3ad91f43ef31", + "0x00000000000001ad428e4906ae43d8f9852d0dd6", + "0x0df407bc6abe9af2093dcb4c974e18d40a6a381a", + "0xb95f8e305c8ca864962c791a09b315fdb89c8fdd", + "0xef1c6e67703c7bd7107eed8303fbe6ec2554bf6b", + "0x12999264f7839df4b7bfc748cc87035032cf04a7", + "0x382ffce2287252f930e1c8dc9328dac5bf282ba1", + "0x495f947276749ce646f68ac8c248420045cb7b5e", + "0x534044106b09d0c15d3248917a23d52646d374bb", + "0x69ec82a7682168322316408d772164ba5f8e1fda", + "0x75e89d5979e4f6fba9f97c104c2f0afb3f1dcb88", + "0xbdb7b80e671774989def74c3a1227f95cea83db4", + "0x151b381058f91cf871e7ea1ee83c45326f61e96d", + "0x2427e88ef4ed1e6d0befa29b04638efc1cf5127a", + "0xd1eefc9d2fd96a80b1372cb72807008ac4ff410b", + "0xd5df655087d99b7b720a5bc8711f296180a4f44b", + "0x0000000000000000000000000000000000000001", + "0x5d00d312e171be5342067c09bae883f9bcb2003b" + ] } + } ] \ No newline at end of file diff --git a/integration/mainnet/eth_createAccessList/test_08.json b/integration/mainnet/eth_createAccessList/test_08.json index e3f969d5..eb6794ef 100644 --- a/integration/mainnet/eth_createAccessList/test_08.json +++ b/integration/mainnet/eth_createAccessList/test_08.json @@ -1,48 +1,54 @@ [ - { - "test": { - "reference": "https://etherscan.io/tx/0xdea8cf208ab5a75e25d168115f85ed5e612ad978b9b8ccc462f69feca5a9a6db", - "description": "1 access list entry" - }, - "request": { - "jsonrpc":"2.0", - "method":"eth_createAccessList", - "params":[ - { - "from": "0xdb534BAB6b7d0690f412395B18bD3df078ECfe2d", - "to": "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", - "gas": "0xC2A5", - "gasPrice": "0x80BEFC0", - "data": "0x095ea7b3000000000000000000000000a87eaf82f287a2c67cb74130906d5ac01f2f925100000000000000000000000000000000000000000000000000000000126af740" - }, - "0xB71B00" - ], - "id":1 - }, + { + "test": { + "id": "eth_createAccessList_at_block_12000000_approve", + "reference": "https://etherscan.io/tx/0xdea8cf208ab5a75e25d168115f85ed5e612ad978b9b8ccc462f69feca5a9a6db", + "description": "one access list entry at block 12000000", + "metadata": { "response": { - "id":1, - "jsonrpc":"2.0", - "result": { - "accessList": [ - { - "address": "0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48", - "storageKeys": [ - "0x7050c9e0f4ca769c69bd3a8ef740bc37934f8e2c036e5a723fd8ee048ed3f8c3", - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x68f621fe39c397cc54aed6af74827e90c78ae48ff5e52728bb90ff2265002db6", - "0xd6d3c01425af47fc283b281db38f2ef429f68e7bd49f0c4700d84377a3e00c66", - "0x755c9d80839f4f8436c39a6b618087a1acdb2bfdd0c673d71d0b5563b539aeda", - "0x10d6a54a4754c8869d6886b5f5d7fbfa5b4522237ea5c60d11bc4e7a1ff9390b" - ] - }, - { - "address": "0xb7277a6e95992041568d9391d09d0122023778a2", - "storageKeys": [] - } - ], - "gasUsed": "0xb58b" - } + "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] } + } + }, + "request": { + "jsonrpc": "2.0", + "method": "eth_createAccessList", + "params": [ + { + "from": "0xdb534BAB6b7d0690f412395B18bD3df078ECfe2d", + "to": "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", + "gas": "0xC2A5", + "gasPrice": "0x80BEFC0", + "data": "0x095ea7b3000000000000000000000000a87eaf82f287a2c67cb74130906d5ac01f2f925100000000000000000000000000000000000000000000000000000000126af740" + }, + "0xB71B00" + ], + "id": 1 + }, + "response": { + "id": 1, + "jsonrpc": "2.0", + "result": { + "accessList": [ + { + "address": "0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48", + "storageKeys": [ + "0x7050c9e0f4ca769c69bd3a8ef740bc37934f8e2c036e5a723fd8ee048ed3f8c3", + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x68f621fe39c397cc54aed6af74827e90c78ae48ff5e52728bb90ff2265002db6", + "0xd6d3c01425af47fc283b281db38f2ef429f68e7bd49f0c4700d84377a3e00c66", + "0x755c9d80839f4f8436c39a6b618087a1acdb2bfdd0c673d71d0b5563b539aeda", + "0x10d6a54a4754c8869d6886b5f5d7fbfa5b4522237ea5c60d11bc4e7a1ff9390b" + ] + }, + { + "address": "0xb7277a6e95992041568d9391d09d0122023778a2", + "storageKeys": [] + } + ], + "gasUsed": "0xb58b" + } } + } ] diff --git a/integration/mainnet/eth_createAccessList/test_09.json b/integration/mainnet/eth_createAccessList/test_09.json index 7911350a..6375ebf0 100644 --- a/integration/mainnet/eth_createAccessList/test_09.json +++ b/integration/mainnet/eth_createAccessList/test_09.json @@ -1,53 +1,59 @@ [ - { - "test": { - "reference": "https://etherscan.io/tx/0xdea8cf208ab5a75e25d168115f85ed5e612ad978b9b8ccc462f69feca5a9a6db", - "description": "no access list entry" - }, - "request": { - "jsonrpc":"2.0", - "method":"eth_createAccessList", - "params":[ - { - "from": "0x8Fe9C787995D12b6EF3a9448aA944593DaC93C6c", - "to": "0x892555E75350E11f2058d086C72b9C94C9493d72", - "gas": "0x668E2", - "gasPrice": "0x1FE5D61A00", - "data": "0xe7fc646500000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000026c0000000000000000000000000000000000000000000000000000000000000244000000000000000000000000000000000000000000000000000000000000021500000000000000000000000000000000000000000000000000000000000001b5000000000000000000000000000000000000000000000000000000000000025800000000000000000000000000000000000000000000000000000000000000a700000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000006b0000000000000000000000000000000000000000000000000000000000000085000000000000000000000000000000000000000000000000000000000000009e0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000019900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000123000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000001690000000000000000000000000000000000000000000000000000000000000152000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000f400000000000000000000000000000000000000000000000000000000000000d400000000000000000000000000000000000000000000000000000000000000d800000000000000000000000000000000000000000000000000000000000000d9" - }, - "0xB71B00" - ], - "id":1 - }, + { + "test": { + "id": "eth_createAccessList_at_block_12000000_revert", + "reference": "https://etherscan.io/tx/0xdea8cf208ab5a75e25d168115f85ed5e612ad978b9b8ccc462f69feca5a9a6db", + "description": "no access list entry at block 12000000", + "metadata": { "response": { - "id":1, - "jsonrpc":"2.0", - "result": { - "accessList": [ - { - "address": "0x892555e75350e11f2058d086c72b9c94c9493d72", - "storageKeys": [ - "0x350993b65eed3eaf3a908f86322f141923458e4d3a8972f5c97bb672b3e8a8d4", - "0xc89ccd72b2ea792f2c0dc9ba6996788435e505ab2cd1301b2b5d0160803b08e1", - "0x000000000000000000000000000000000000000000000000000000000000000e", - "0x000000000000000000000000000000000000000000000000000000000000000a", - "0xa7b4ccdf81bc32d1e2176af73ef4736186f278bed4fab7b9d6ab189c3903fc37", - "0xe464df08b1a6bc4a7efd0b57900b5ac59bc86655560732a1d4391a671c7e1d1a", - "0xbfbe8310fe66eba55e1ee378a9036bf3e4896a07d911027a46be0497ba92bb28", - "0x3342f3943919b6eca1851ee5f59c35336e7e5e165bc32724ba642e82a03ac18d", - "0x6fd6796dd663538e711579bcb56d95f71a67ae65ad5431ec94f9719699da063c", - "0x9843988e4fcb784b0f01aa741314ab445dfb55418f4b7d9897fc7ab2d2e018b2", - "0x000000000000000000000000000000000000000000000000000000000000000f", - "0x4ce2dac29b7f62ea47a3d2578f10d0788b17a133cfdc1e280432bad7529ad4d7", - "0xe1120b0b1e1850317111b310cc89571b7ccf1195feb3aaf638ee0f03f5d97c7b", - "0x000000000000000000000000000000000000000000000000000000000000000d" - ] - } - ], - "error": "execution reverted", - "gasUsed": "0x122cf" - } + "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] } + } + }, + "request": { + "jsonrpc": "2.0", + "method": "eth_createAccessList", + "params": [ + { + "from": "0x8Fe9C787995D12b6EF3a9448aA944593DaC93C6c", + "to": "0x892555E75350E11f2058d086C72b9C94C9493d72", + "gas": "0x668E2", + "gasPrice": "0x1FE5D61A00", + "data": "0xe7fc646500000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000026c0000000000000000000000000000000000000000000000000000000000000244000000000000000000000000000000000000000000000000000000000000021500000000000000000000000000000000000000000000000000000000000001b5000000000000000000000000000000000000000000000000000000000000025800000000000000000000000000000000000000000000000000000000000000a700000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000006b0000000000000000000000000000000000000000000000000000000000000085000000000000000000000000000000000000000000000000000000000000009e0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000019900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000123000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000001690000000000000000000000000000000000000000000000000000000000000152000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000f400000000000000000000000000000000000000000000000000000000000000d400000000000000000000000000000000000000000000000000000000000000d800000000000000000000000000000000000000000000000000000000000000d9" + }, + "0xB71B00" + ], + "id": 1 + }, + "response": { + "id": 1, + "jsonrpc": "2.0", + "result": { + "accessList": [ + { + "address": "0x892555e75350e11f2058d086c72b9c94c9493d72", + "storageKeys": [ + "0x350993b65eed3eaf3a908f86322f141923458e4d3a8972f5c97bb672b3e8a8d4", + "0xc89ccd72b2ea792f2c0dc9ba6996788435e505ab2cd1301b2b5d0160803b08e1", + "0x000000000000000000000000000000000000000000000000000000000000000e", + "0x000000000000000000000000000000000000000000000000000000000000000a", + "0xa7b4ccdf81bc32d1e2176af73ef4736186f278bed4fab7b9d6ab189c3903fc37", + "0xe464df08b1a6bc4a7efd0b57900b5ac59bc86655560732a1d4391a671c7e1d1a", + "0xbfbe8310fe66eba55e1ee378a9036bf3e4896a07d911027a46be0497ba92bb28", + "0x3342f3943919b6eca1851ee5f59c35336e7e5e165bc32724ba642e82a03ac18d", + "0x6fd6796dd663538e711579bcb56d95f71a67ae65ad5431ec94f9719699da063c", + "0x9843988e4fcb784b0f01aa741314ab445dfb55418f4b7d9897fc7ab2d2e018b2", + "0x000000000000000000000000000000000000000000000000000000000000000f", + "0x4ce2dac29b7f62ea47a3d2578f10d0788b17a133cfdc1e280432bad7529ad4d7", + "0xe1120b0b1e1850317111b310cc89571b7ccf1195feb3aaf638ee0f03f5d97c7b", + "0x000000000000000000000000000000000000000000000000000000000000000d" + ] + } + ], + "error": "execution reverted", + "gasUsed": "0x122cf" + } } + } ] diff --git a/integration/mainnet/eth_createAccessList/test_17.json b/integration/mainnet/eth_createAccessList/test_17.json index d12ffb4c..3dec4bc3 100644 --- a/integration/mainnet/eth_createAccessList/test_17.json +++ b/integration/mainnet/eth_createAccessList/test_17.json @@ -1,63 +1,69 @@ [ - { - "test": { - "reference": "https://etherscan.io/tx/0x4ee2bbfbaf90afef640c6ddacde696e6c4b81f9f644fc8c78670b910f922236b", - "description": "with contract deploy and accessList with to" - }, - "request": { - "jsonrpc": "2.0", - "method": "eth_createAccessList", - "params": [ - { - "from": "0x6c96769a08ddefa92e06de0a32089272c57f79b2", - "to": "0xbeefbabeea323f07c59926295205d3b7a17e8638", - "gas": "0x37696", - "gasPrice": "0x34bcab274", - "data": "0x000000020000000000000000000000000000000000000000000000000000000003e366320000000000000000000000000000000000000000000000005ea06407f0408000aaaebe6fe48e54f431b0c390cfaf0b017d09d42dc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000bb800000000000000000000000000000000000000000000000000210fa439a7fc04", - "value": "0xe4e1c0" - }, - "0xe4e1bf" - ], - "id": 1 - }, + { + "test": { + "id": "eth_createAccessList_at_block_130572_contract_deploy_and_access_list_with_to", + "reference": "https://etherscan.io/tx/0x4ee2bbfbaf90afef640c6ddacde696e6c4b81f9f644fc8c78670b910f922236b", + "description": "at block 130572 with contract deploy and access list with to", + "metadata": { "response": { - "id": 1, - "jsonrpc": "2.0", - "result": { - "accessList": [ - { - "address": "0x06729eb2424da47898f935267bd4a62940de5105", - "storageKeys": [ - "0x0000000000000000000000000000000000000000000000000000000000000009", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x6fac4587033fa036e2426ad0134940d3a683633cc2b2b7862f39195344f163d1", - "0x0000000000000000000000000000000000000000000000000000000000000008" - ] - }, - { - "address": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", - "storageKeys": [ - "0x8252275f333a87e99ed1ff1ad4defca3d32a065a9db2647c9014d94552d0acef", - "0x6157f0620f35ab12aef2698523b19b146ca18cf8dd91817b7cc7546b0d6b5250" - ] - }, - { - "address": "0xaaaebe6fe48e54f431b0c390cfaf0b017d09d42d", - "storageKeys": [ - "0x182d99ea001f7ef52b8283cf7e854519982d8ff86d31c77d3e98e34310ace7f2", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x29d00d6fa75b3651b1a634cd73ec75adaba7e9751b58877e0b97ffe826928fcb" - ] - }, - { - "address": "0x829bd824b016326a401d083b33d092293333a830", - "storageKeys": [] - } - ], - "gasUsed": "0x1b57b" - } + "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] } + } + }, + "request": { + "jsonrpc": "2.0", + "method": "eth_createAccessList", + "params": [ + { + "from": "0x6c96769a08ddefa92e06de0a32089272c57f79b2", + "to": "0xbeefbabeea323f07c59926295205d3b7a17e8638", + "gas": "0x37696", + "gasPrice": "0x34bcab274", + "data": "0x000000020000000000000000000000000000000000000000000000000000000003e366320000000000000000000000000000000000000000000000005ea06407f0408000aaaebe6fe48e54f431b0c390cfaf0b017d09d42dc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000bb800000000000000000000000000000000000000000000000000210fa439a7fc04", + "value": "0xe4e1c0" + }, + "0xe4e1bf" + ], + "id": 1 + }, + "response": { + "id": 1, + "jsonrpc": "2.0", + "result": { + "accessList": [ + { + "address": "0x06729eb2424da47898f935267bd4a62940de5105", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000009", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x6fac4587033fa036e2426ad0134940d3a683633cc2b2b7862f39195344f163d1", + "0x0000000000000000000000000000000000000000000000000000000000000008" + ] + }, + { + "address": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", + "storageKeys": [ + "0x8252275f333a87e99ed1ff1ad4defca3d32a065a9db2647c9014d94552d0acef", + "0x6157f0620f35ab12aef2698523b19b146ca18cf8dd91817b7cc7546b0d6b5250" + ] + }, + { + "address": "0xaaaebe6fe48e54f431b0c390cfaf0b017d09d42d", + "storageKeys": [ + "0x182d99ea001f7ef52b8283cf7e854519982d8ff86d31c77d3e98e34310ace7f2", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x29d00d6fa75b3651b1a634cd73ec75adaba7e9751b58877e0b97ffe826928fcb" + ] + }, + { + "address": "0x829bd824b016326a401d083b33d092293333a830", + "storageKeys": [] + } + ], + "gasUsed": "0x1b57b" + } } + } ] \ No newline at end of file From 9d6e29b04301315597ec2c01747fd1bbacaabc89 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Thu, 18 Dec 2025 11:25:22 +0100 Subject: [PATCH 17/87] integration: command-line option for JSON diff tool --- cmd/integration/main.go | 52 +++++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 17 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 7dc728a4..a8cb0404 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -481,7 +481,23 @@ const ( DiffTool ) -var jsonDiffKind = JdLibrary +func (k JsonDiffKind) String() string { + return [...]string{"jd", "json-diff", "diff"}[k] +} + +// ParseJsonDiffKind converts a string into a JsonDiffKind enum type +func ParseJsonDiffKind(s string) (JsonDiffKind, error) { + switch strings.ToLower(s) { + case "jd": + return JdLibrary, nil + case "json-diff": + return JsonDiffTool, nil + case "diff": + return DiffTool, nil + default: + return JdLibrary, fmt.Errorf("invalid JsonDiffKind value: %s", s) + } +} type Config struct { ExitOnFail bool @@ -509,7 +525,7 @@ type Config struct { DisplayOnlyFail bool TransportType string Parallel bool - UseJSONDiff bool + DiffKind JsonDiffKind WithoutCompareResults bool WaitingTime int DoNotCompareError bool @@ -578,7 +594,7 @@ func NewConfig() *Config { DisplayOnlyFail: false, TransportType: "http", Parallel: true, - UseJSONDiff: true, + DiffKind: JdLibrary, WithoutCompareResults: false, WaitingTime: 0, DoNotCompareError: false, @@ -657,8 +673,8 @@ func (c *Config) parseFlags() error { excludeTestList := flag.String("X", "", "exclude test list") flag.StringVar(excludeTestList, "exclude-test-list", "", "exclude test list") - jsonDiff := flag.Bool("j", true, "use json-diff") - flag.BoolVar(jsonDiff, "json-diff", true, "use json-diff") + diffKind := flag.String("j", c.DiffKind.String(), "diff for JSON values, one of: jd, json-diff, diff") + flag.StringVar(diffKind, "json-diff", c.DiffKind.String(), "diff for JSON values, one of: jd, json-diff, diff") waitingTime := flag.Int("w", 0, "waiting time in milliseconds") flag.IntVar(waitingTime, "waiting-time", 0, "waiting time in milliseconds") @@ -720,7 +736,6 @@ func (c *Config) parseFlags() error { c.ExcludeTestList = *excludeTestList c.StartTest = *startTest c.TransportType = *transportType - c.UseJSONDiff = *jsonDiff c.WaitingTime = *waitingTime c.ForceDumpJSONs = *dumpResponse c.WithoutCompareResults = *withoutCompare @@ -731,6 +746,12 @@ func (c *Config) parseFlags() error { c.MemProfile = *memProfile c.TraceFile = *traceFile + kind, err := ParseJsonDiffKind(*diffKind) + if err != nil { + return err + } + c.DiffKind = kind + if *daemonPort { c.DaemonUnderTest = DaemonOnOtherPort } @@ -744,7 +765,6 @@ func (c *Config) parseFlags() error { if *compareErigon { c.VerifyWithDaemon = true c.DaemonAsReference = DaemonOnDefaultPort - c.UseJSONDiff = true } if *createJWT != "" { @@ -1368,19 +1388,19 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t } } -func runCompare(useJSONDiff bool, errorFile, tempFile1, tempFile2, diffFile string) bool { +func runCompare(jsonDiff bool, errorFile, tempFile1, tempFile2, diffFile string) bool { var cmd *exec.Cmd alreadyFailed := false - if useJSONDiff { + if jsonDiff { // Check if json-diff is available checkCmd := exec.Command("json-diff", "--help") if err := checkCmd.Run(); err != nil { - useJSONDiff = false + jsonDiff = false } } - if useJSONDiff { + if jsonDiff { cmd = exec.Command("sh", "-c", fmt.Sprintf("json-diff -s %s %s > %s 2> %s", tempFile2, tempFile1, diffFile, errorFile)) alreadyFailed = false } else { @@ -1513,8 +1533,8 @@ func extractJsonCommands(jsonFilename string) ([]JSONRPCCommand, error) { return jsonrpcCommands, nil } -func (c *JSONRPCCommand) compareJSONFiles(errorFileName, fileName1, fileName2, diffFileName string) (bool, error) { - switch jsonDiffKind { +func (c *JSONRPCCommand) compareJSONFiles(kind JsonDiffKind, errorFileName, fileName1, fileName2, diffFileName string) (bool, error) { + switch kind { case JdLibrary: jsonNode1, err := jd.ReadJsonFile(fileName1) if err != nil { @@ -1555,7 +1575,7 @@ func (c *JSONRPCCommand) compareJSONFiles(errorFileName, fileName1, fileName2, d } return true, nil default: - return false, fmt.Errorf("unknown JSON diff kind: %d", jsonDiffKind) + return false, fmt.Errorf("unknown JSON diff kind: %d", kind) } } @@ -1614,7 +1634,7 @@ func (c *JSONRPCCommand) compareJSON(config *Config, response interface{}, jsonF } } - diffResult, err := c.compareJSONFiles(errorFile, tempFile1, tempFile2, diffFile) + diffResult, err := c.compareJSONFiles(config.DiffKind, errorFile, tempFile1, tempFile2, diffFile) diffFileSize := int64(0) if diffResult { @@ -1925,7 +1945,6 @@ func runMain() int { defer pprof.StopCPUProfile() } - // Execution trace if config.TraceFile != "" { f, err := os.Create(config.TraceFile) if err != nil { @@ -1943,7 +1962,6 @@ func runMain() int { defer trace.Stop() } - // Memory profiling at end defer func() { if config.MemProfile != "" { f, err := os.Create(config.MemProfile) From d7c63ad9312e6cc9c4d8036573ab00919056f729 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Thu, 18 Dec 2025 11:39:28 +0100 Subject: [PATCH 18/87] integration: small refactoring --- cmd/integration/main.go | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index a8cb0404..e448e14d 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1234,7 +1234,7 @@ func validateJsonRpcResponse(response any) error { return nil } -func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, target string, verboseLevel int) (any, error) { +func executeRequest(ctx context.Context, config *Config, transportType, jwtAuth, requestDumps, target string) (any, error) { if transportType == "http" || transportType == "http_comp" || transportType == "https" { headers := map[string]string{ "Content-Type": "application/json", @@ -1261,7 +1261,7 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t req, err := http.NewRequestWithContext(ctx, "POST", targetURL, bytes.NewBufferString(requestDumps)) if err != nil { - if verboseLevel > 0 { + if config.VerboseLevel > 0 { fmt.Printf("\nhttp request creation fail: %s %v\n", targetURL, err) } return nil, err @@ -1274,11 +1274,11 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t start := time.Now() resp, err := client.Do(req) elapsed := time.Since(start) - if verboseLevel > 1 { + if config.VerboseLevel > 1 { fmt.Printf("http round-trip time: %v\n", elapsed) } if err != nil { - if verboseLevel > 0 { + if config.VerboseLevel > 0 { fmt.Printf("\nhttp connection fail: %s %v\n", targetURL, err) } return nil, err @@ -1291,28 +1291,27 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t }(resp.Body) if resp.StatusCode != http.StatusOK { - if verboseLevel > 1 { + if config.VerboseLevel > 1 { fmt.Printf("\npost result status_code: %d\n", resp.StatusCode) } - // TODO: add option to stop on any HTTP error? return nil, fmt.Errorf("http status %v", resp.Status) } body, err := io.ReadAll(resp.Body) if err != nil { - if verboseLevel > 0 { + if config.VerboseLevel > 0 { fmt.Printf("\nfailed to read response body: %v\n", err) } return nil, err } - if verboseLevel > 1 { + if config.VerboseLevel > 1 { fmt.Printf("\nhttp response body: %s\n", string(body)) } var result any if err = json.Unmarshal(body, &result); err != nil { - if verboseLevel > 0 { + if config.VerboseLevel > 0 { fmt.Printf("\nfailed to parse JSON: %v\n", err) } return nil, err @@ -1322,7 +1321,7 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t return nil, err } - if verboseLevel > 1 { + if config.VerboseLevel > 1 { fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, requestDumps, result) } @@ -1341,7 +1340,7 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t conn, _, err := dialer.Dial(wsTarget, headers) if err != nil { - if verboseLevel > 0 { + if config.VerboseLevel > 0 { fmt.Printf("\nwebsocket connection fail: %v\n", err) } return nil, err @@ -1354,7 +1353,7 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t }(conn) if err = conn.WriteMessage(websocket.TextMessage, []byte(requestDumps)); err != nil { - if verboseLevel > 0 { + if config.VerboseLevel > 0 { fmt.Printf("\nwebsocket write fail: %v\n", err) } return nil, err @@ -1362,7 +1361,7 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t _, message, err := conn.ReadMessage() if err != nil { - if verboseLevel > 0 { + if config.VerboseLevel > 0 { fmt.Printf("\nwebsocket read fail: %v\n", err) } return nil, err @@ -1370,7 +1369,7 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t var result any if err = json.Unmarshal(message, &result); err != nil { - if verboseLevel > 0 { + if config.VerboseLevel > 0 { fmt.Printf("\nfailed to parse JSON: %v\n", err) } return nil, err @@ -1380,7 +1379,7 @@ func executeRequest(ctx context.Context, transportType, jwtAuth, requestDumps, t return nil, err } - if verboseLevel > 1 { + if config.VerboseLevel > 1 { fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, requestDumps, result) } @@ -1815,7 +1814,7 @@ func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *Te diffFile := outputAPIFilename + "-diff.json" if !config.VerifyWithDaemon { - result, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target, config.VerboseLevel) + result, err := executeRequest(ctx, config, transportType, jwtAuth, string(requestDumps), target) if err != nil { return false, err } @@ -1834,7 +1833,7 @@ func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *Te outputDirName, daemonFile, expRspFile, diffFile, descriptor) } else { target = getTarget(DaemonOnDefaultPort, method, config) - result, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target, config.VerboseLevel) + result, err := executeRequest(ctx, config, transportType, jwtAuth, string(requestDumps), target) if err != nil { return false, err } @@ -1845,7 +1844,7 @@ func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *Te return false, errors.New("response is nil (maybe node at " + target + " is down?)") } target1 = getTarget(config.DaemonAsReference, method, config) - result1, err := executeRequest(ctx, transportType, jwtAuth, string(requestDumps), target1, config.VerboseLevel) + result1, err := executeRequest(ctx, config, transportType, jwtAuth, string(requestDumps), target1) if err != nil { return false, err } From fd71b3929c09456311ca4ac577573e7f91d7f8f7 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Thu, 18 Dec 2025 12:38:15 +0100 Subject: [PATCH 19/87] integration: small refactoring --- cmd/integration/main.go | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index e448e14d..baee7130 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1170,17 +1170,14 @@ var ( errJsonRpcContainsBothResultAndError = errors.New("JSON-RPC 2.0 response contains both 'result' and 'error'") ) -// validateJsonRpcObject checks that the received response is a valid JSON-RPC object, according to 2.0 spec. -// This implies that the response must be a JSON object containing: +// validateJsonRpcObject checks that the passed object is a valid JSON-RPC object, according to 2.0 spec. +// This implies that it must be a JSON object containing: // - one mandatory "jsonrpc" field which must be equal to "2.0" // - one mandatory "id" field which must match the value of the same field in the request -// - either one "result" field in case of success or one "error" field otherwise, mutually exclusive -// The strict parameter relaxes the compliance requirements by allowing both 'result' and 'error' to be present -// TODO: strict parameter is required for corner cases in streaming mode when 'result' is emitted up-front // https://www.jsonrpc.org/specification -func validateJsonRpcObject(response map[string]any, strict bool) error { - // Ensure that the response is a valid JSON-RPC object. - jsonrpc, ok := response[jsonRpcTag] +func validateJsonRpcObject(object map[string]any) error { + // Ensure that the object is a valid JSON-RPC object. + jsonrpc, ok := object[jsonRpcTag] if !ok { return errJsonRpcMissingVersion } @@ -1188,10 +1185,25 @@ func validateJsonRpcObject(response map[string]any, strict bool) error { if jsonrpcVersion != "2.0" { return errJsonRpcNoncompliantVersion } - _, ok = response[identifierTag] + _, ok = object[identifierTag] if !ok { return errJsonRpcMissingId } + return nil +} + +// validateJsonRpcResponseObject checks that the passed response is a valid JSON-RPC response, according to 2.0 spec. +// This implies that the response must be a valid JSON-RPC object plus: +// - either one "result" field in case of success or one "error" field otherwise, mutually exclusive +// The strict parameter relaxes the compliance requirements by allowing both 'result' and 'error' to be present +// TODO: strict parameter is required for corner cases in streaming mode when 'result' is emitted up-front +// https://www.jsonrpc.org/specification +func validateJsonRpcResponseObject(response map[string]any, strict bool) error { + // Ensure that the response is a valid JSON-RPC object. + err := validateJsonRpcObject(response) + if err != nil { + return err + } _, hasResult := response[resultTag] _, hasError := response[errorTag] if !hasResult && !hasError { @@ -1214,7 +1226,7 @@ func validateJsonRpcResponse(response any) error { } if isMap { // Ensure that the response is a valid JSON-RPC object. - err := validateJsonRpcObject(responseAsMap, false) + err := validateJsonRpcResponseObject(responseAsMap, false) if err != nil { return err } @@ -1225,7 +1237,7 @@ func validateJsonRpcResponse(response any) error { if !isElementMap { return errJsonRpcUnexpectedFormat } - err := validateJsonRpcObject(elementAsMap, false) + err := validateJsonRpcResponseObject(elementAsMap, false) if err != nil { return err } From d375b1f98252cfcc6b86a1aef6e91b562ba40466 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Thu, 18 Dec 2025 17:29:26 +0100 Subject: [PATCH 20/87] integration: extract ResultCollector type --- cmd/integration/main.go | 127 ++++++++++++++++++++++------------------ 1 file changed, 70 insertions(+), 57 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index baee7130..33390d88 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1536,10 +1536,10 @@ func extractJsonCommands(jsonFilename string) ([]JSONRPCCommand, error) { var jsonrpcCommands []JSONRPCCommand data, err := os.ReadFile(jsonFilename) if err != nil { - return jsonrpcCommands, errors.New("cannot read file " + jsonFilename) + return jsonrpcCommands, errors.New("cannot read file " + jsonFilename + ": " + err.Error()) } if err := json.Unmarshal(data, &jsonrpcCommands); err != nil { - return jsonrpcCommands, errors.New("cannot parse JSON " + jsonFilename) + return jsonrpcCommands, errors.New("cannot parse JSON " + jsonFilename + ": " + err.Error()) } return jsonrpcCommands, nil } @@ -1925,6 +1925,60 @@ func mustAtoi(s string) int { return n } +type ResultCollector struct { + resultsChan chan chan TestResult + config *Config + successTests int + failedTests int + executedTests int +} + +func newResultCollector(resultsChan chan chan TestResult, config *Config) *ResultCollector { + return &ResultCollector{resultsChan: resultsChan, config: config} +} + +func (c *ResultCollector) start(ctx context.Context, cancelCtx context.CancelFunc, resultsWg *sync.WaitGroup) { + go func() { + defer resultsWg.Done() + for { + select { + case testResultCh := <-c.resultsChan: + if testResultCh == nil { + return + } + select { + case result := <-testResultCh: + file := fmt.Sprintf("%-60s", result.Test.Name) + tt := fmt.Sprintf("%-15s", result.Test.TransportType) + fmt.Printf("%04d. %s::%s ", result.Test.Number, tt, file) + + if result.Success { + c.successTests++ + if c.config.VerboseLevel > 0 { + fmt.Println("OK") + } else { + fmt.Print("OK\r") + } + } else { + c.failedTests++ + fmt.Printf("failed: %s\n", result.Error.Error()) + if c.config.ExitOnFail { + // Signal other tasks to stop and exit + cancelCtx() + return + } + } + c.executedTests++ + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() +} + func runMain() int { // Create a channel to receive OS signals and register for clean termination signals. sigs := make(chan os.Signal, 1) @@ -2007,9 +2061,6 @@ func runMain() int { } scheduledTests := 0 - executedTests := 0 - failedTests := 0 - successTests := 0 skippedTests := 0 var serverEndpoints string @@ -2080,45 +2131,8 @@ func runMain() int { // Results collector var resultsWg sync.WaitGroup resultsWg.Add(1) - go func() { - defer resultsWg.Done() - for { - select { - case testResultCh := <-resultsChan: - if testResultCh == nil { - return - } - select { - case result := <-testResultCh: - file := fmt.Sprintf("%-60s", result.Test.Name) - tt := fmt.Sprintf("%-15s", result.Test.TransportType) - fmt.Printf("%04d. %s::%s ", result.Test.Number, tt, file) - - if result.Success { - successTests++ - if config.VerboseLevel > 0 { - fmt.Println("OK") - } else { - fmt.Print("OK\r") - } - } else { - failedTests++ - fmt.Printf("failed: %s\n", result.Error.Error()) - if config.ExitOnFail { - // Signal other tasks to stop and exit - cancelCtx() - return - } - } - executedTests++ - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } - }() + resultsCollector := newResultCollector(resultsChan, config) + resultsCollector.start(ctx, cancelCtx, &resultsWg) go func() { for { @@ -2281,15 +2295,6 @@ func runMain() int { } } } - - if config.ExitOnFail && failedTests > 0 { - fmt.Println("WARN: test sequence interrupted by failure (ExitOnFail)") - break - } - } - - if scheduledTests == 0 && config.TestingAPIsWith != "" { - fmt.Printf("WARN: API filter %s selected no tests\n", config.TestingAPIsWith) } // Close channels and wait for completion @@ -2298,6 +2303,14 @@ func runMain() int { close(resultsChan) resultsWg.Wait() + if scheduledTests == 0 && config.TestingAPIsWith != "" { + fmt.Printf("WARN: API filter %s selected no tests\n", config.TestingAPIsWith) + } + + if config.ExitOnFail && resultsCollector.failedTests > 0 { + fmt.Println("WARN: test sequence interrupted by failure (ExitOnFail)") + } + // Clean empty subfolders in the output dir if entries, err := os.ReadDir(config.OutputDir); err == nil { for _, entry := range entries { @@ -2329,11 +2342,11 @@ func runMain() int { fmt.Printf("Test suite total tests: %d\n", globalTestNumber) fmt.Printf("Number of skipped tests: %d\n", skippedTests) fmt.Printf("Number of selected tests: %d\n", scheduledTests) - fmt.Printf("Number of executed tests: %d\n", executedTests) - fmt.Printf("Number of success tests: %d\n", successTests) - fmt.Printf("Number of failed tests: %d\n", failedTests) + fmt.Printf("Number of executed tests: %d\n", resultsCollector.executedTests) + fmt.Printf("Number of success tests: %d\n", resultsCollector.successTests) + fmt.Printf("Number of failed tests: %d\n", resultsCollector.failedTests) - if failedTests > 0 { + if resultsCollector.failedTests > 0 { return 1 } return 0 From 47ac5949ebc8a55819c49e7c895ed87a616d0a03 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 19 Dec 2025 10:20:27 +0100 Subject: [PATCH 21/87] integration: refactor and reduce marshal/unmarshal --- cmd/integration/main.go | 119 +++++++++++++++++----------------------- 1 file changed, 50 insertions(+), 69 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 33390d88..a02dfe1c 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -567,9 +567,9 @@ type JsonRpcTest struct { } type JSONRPCCommand struct { - Request interface{} `json:"request"` - Response interface{} `json:"response"` - TestInfo *JsonRpcTest `json:"test"` + Request json.RawMessage `json:"request"` + Response json.RawMessage `json:"response"` + TestInfo *JsonRpcTest `json:"test"` } func NewConfig() *Config { @@ -1103,7 +1103,7 @@ func isNotComparedError(testName, net string) bool { return false } -func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response, expectedResponse interface{}) error { +func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response, expectedResponse []byte) error { if !dumpJSON { return nil } @@ -1121,12 +1121,7 @@ func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response return err } } - data, err := json.MarshalIndent(response, "", " ") - if err != nil { - fmt.Printf("Error marshaling daemon response: %v\n", err) - continue - } - if err := os.WriteFile(daemonFile, data, 0644); err != nil { + if err := os.WriteFile(daemonFile, response, 0644); err != nil { fmt.Printf("Exception on file write daemon: %v attempt %d\n", err, attempt) continue } @@ -1139,12 +1134,7 @@ func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response return err } } - data, err := json.MarshalIndent(expectedResponse, "", " ") - if err != nil { - fmt.Printf("Error marshaling expected response: %v\n", err) - continue - } - if err := os.WriteFile(expRspFile, data, 0644); err != nil { + if err := os.WriteFile(expRspFile, expectedResponse, 0644); err != nil { fmt.Printf("Exception on file write expected: %v attempt %d\n", err, attempt) continue } @@ -1206,7 +1196,7 @@ func validateJsonRpcResponseObject(response map[string]any, strict bool) error { } _, hasResult := response[resultTag] _, hasError := response[errorTag] - if !hasResult && !hasError { + if strict && !hasResult && !hasError { return errJsonRpcMissingResultOrError } if strict && hasResult && hasError { @@ -1246,7 +1236,7 @@ func validateJsonRpcResponse(response any) error { return nil } -func executeRequest(ctx context.Context, config *Config, transportType, jwtAuth, requestDumps, target string) (any, error) { +func executeRequest(ctx context.Context, config *Config, transportType, jwtAuth, requestDumps, target string) ([]byte, error) { if transportType == "http" || transportType == "http_comp" || transportType == "https" { headers := map[string]string{ "Content-Type": "application/json", @@ -1321,23 +1311,11 @@ func executeRequest(ctx context.Context, config *Config, transportType, jwtAuth, fmt.Printf("\nhttp response body: %s\n", string(body)) } - var result any - if err = json.Unmarshal(body, &result); err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nfailed to parse JSON: %v\n", err) - } - return nil, err - } - err = validateJsonRpcResponse(result) - if err != nil { - return nil, err - } - if config.VerboseLevel > 1 { - fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, requestDumps, result) + fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, requestDumps, string(body)) } - return result, nil + return body, nil } else { // WebSocket wsTarget := "ws://" + target @@ -1379,23 +1357,11 @@ func executeRequest(ctx context.Context, config *Config, transportType, jwtAuth, return nil, err } - var result any - if err = json.Unmarshal(message, &result); err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nfailed to parse JSON: %v\n", err) - } - return nil, err - } - err = validateJsonRpcResponse(result) - if err != nil { - return nil, err - } - if config.VerboseLevel > 1 { - fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, requestDumps, result) + fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, requestDumps, string(message)) } - return result, nil + return message, nil } } @@ -1682,11 +1648,11 @@ func (c *JSONRPCCommand) compareJSON(config *Config, response interface{}, jsonF return true, nil } -func (c *JSONRPCCommand) processResponse(response, result1 any, responseInFile interface{}, config *Config, outputDir, daemonFile, expRspFile, diffFile string, descriptor *TestDescriptor) (bool, error) { +func (c *JSONRPCCommand) processResponse(response, result1, responseInFile []byte, config *Config, outputDir, daemonFile, expRspFile, diffFile string, descriptor *TestDescriptor) (bool, error) { jsonFile := descriptor.Name testNumber := descriptor.Number - var expectedResponse interface{} + var expectedResponse []byte if result1 != nil { expectedResponse = result1 } else { @@ -1701,12 +1667,35 @@ func (c *JSONRPCCommand) processResponse(response, result1 any, responseInFile i return true, nil } - // Deep comparison between the received response and the expected response - respJSON, _ := json.Marshal(response) - expJSON, _ := json.Marshal(expectedResponse) + var responseMap map[string]interface{} + var respIsMap bool + if err := json.Unmarshal(response, &responseMap); err == nil { + respIsMap = true + response, err = json.Marshal(responseMap) + if err != nil { + return false, err + } + err = validateJsonRpcResponse(responseMap) + if err != nil { + return false, err + } + } + var expectedMap map[string]interface{} + var expIsMap bool + if err := json.Unmarshal(expectedResponse, &expectedMap); err == nil { + expIsMap = true + expectedResponse, err = json.Marshal(expectedMap) + if err != nil { + return false, err + } + err = validateJsonRpcResponse(expectedMap) + if err != nil { + return false, err + } + } // Fast path: if actual/expected are identical byte-wise, no need to compare them - if bytes.Equal(respJSON, expJSON) { + if bytes.Equal(response, expectedResponse) { err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err @@ -1715,10 +1704,7 @@ func (c *JSONRPCCommand) processResponse(response, result1 any, responseInFile i } // Check various conditions where we don't care about differences - responseMap, respIsMap := response.(map[string]interface{}) - expectedMap, expIsMap := expectedResponse.(map[string]interface{}) - - if respIsMap && expIsMap { + if respIsMap && expIsMap { // TODO: extract function ignoreDifferences and handle JSON batch responses _, responseHasResult := responseMap["result"] expectedResult, expectedHasResult := expectedMap["result"] _, responseHasError := responseMap["error"] @@ -1754,16 +1740,17 @@ func (c *JSONRPCCommand) processResponse(response, result1 any, responseInFile i } } + // We need to compare the response and expectedResponse, so we dump them to files first err := dumpJSONs(true, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { return false, err } - same, err := c.compareJSON(config, response, jsonFile, daemonFile, expRspFile, diffFile, testNumber) + same, err := c.compareJSON(config, responseMap, jsonFile, daemonFile, expRspFile, diffFile, testNumber) if err != nil { return same, err } - if same { + if same && !config.ForceDumpJSONs { err := os.Remove(daemonFile) if err != nil { return false, err @@ -1778,10 +1765,6 @@ func (c *JSONRPCCommand) processResponse(response, result1 any, responseInFile i } } - err = dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) - if err != nil { - return false, err - } return same, nil } @@ -1791,23 +1774,21 @@ func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *Te request := c.Request method := "" - requestBytes, _ := json.Marshal(request) var requestMap map[string]interface{} - if err := json.Unmarshal(requestBytes, &requestMap); err == nil { + if err := json.Unmarshal(request, &requestMap); err == nil { if m, ok := requestMap["method"].(string); ok { method = m } } else { // Try an array of requests var requestArray []map[string]interface{} - if err := json.Unmarshal(requestBytes, &requestArray); err == nil && len(requestArray) > 0 { + if err := json.Unmarshal(request, &requestArray); err == nil && len(requestArray) > 0 { if m, ok := requestArray[0]["method"].(string); ok { method = m } } } - requestDumps, _ := json.Marshal(request) target := getTarget(config.DaemonUnderTest, method, config) target1 := "" @@ -1826,7 +1807,7 @@ func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *Te diffFile := outputAPIFilename + "-diff.json" if !config.VerifyWithDaemon { - result, err := executeRequest(ctx, config, transportType, jwtAuth, string(requestDumps), target) + result, err := executeRequest(ctx, config, transportType, jwtAuth, string(request), target) if err != nil { return false, err } @@ -1845,7 +1826,7 @@ func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *Te outputDirName, daemonFile, expRspFile, diffFile, descriptor) } else { target = getTarget(DaemonOnDefaultPort, method, config) - result, err := executeRequest(ctx, config, transportType, jwtAuth, string(requestDumps), target) + result, err := executeRequest(ctx, config, transportType, jwtAuth, string(request), target) if err != nil { return false, err } @@ -1856,7 +1837,7 @@ func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *Te return false, errors.New("response is nil (maybe node at " + target + " is down?)") } target1 = getTarget(config.DaemonAsReference, method, config) - result1, err := executeRequest(ctx, config, transportType, jwtAuth, string(requestDumps), target1) + result1, err := executeRequest(ctx, config, transportType, jwtAuth, string(request), target1) if err != nil { return false, err } From 61d6de99be45f022c479ad59d5bbf8008f4dd7f7 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 19 Dec 2025 10:50:06 +0100 Subject: [PATCH 22/87] integration: avoid writing JSON file from archive --- cmd/integration/main.go | 101 ++++++++++++++++------------------------ 1 file changed, 40 insertions(+), 61 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index a02dfe1c..0498e946 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -374,7 +374,7 @@ func autodetectCompression(archivePath string, inFile *os.File) (string, error) } // extractArchive extracts a compressed or uncompressed tar archive. -func extractArchive(archivePath string, sanitizeExtension bool) ([]string, error) { +func extractArchive(archivePath string, sanitizeExtension bool) ([]JSONRPCCommand, error) { // Open the archive file inputFile, err := os.Open(archivePath) if err != nil { @@ -420,57 +420,52 @@ func extractArchive(archivePath string, sanitizeExtension bool) ([]string, error reader = inputFile } - // Iterate over files in the archive and extract them + var jsonrpcCommands []JSONRPCCommand + + // We expect the archive to contain a single JSON file tarReader := tar.NewReader(reader) - tmpFilePaths := []string{} + header, err := tarReader.Next() + if err == io.EOF { + return jsonrpcCommands, nil // Empty archive + } + if err != nil { + return nil, fmt.Errorf("failed to read tar header: %w", err) + } + if header.Typeflag != tar.TypeReg { + return nil, fmt.Errorf("archive must contain a single JSON file, found %s", header.Name) + } + + size := header.Size + size++ // one byte for final read at EOF + if size < 512 { + size = 512 + } + data := make([]byte, 0, size) for { - header, err := tarReader.Next() - if err == io.EOF { - break // End of archive - } + var n int + n, err = tarReader.Read(data[len(data):cap(data)]) + data = data[:len(data)+n] if err != nil { - return nil, fmt.Errorf("failed to read tar header: %w", err) - } - - targetPath := filepath.Dir(archivePath) + "/" + header.Name - - switch header.Typeflag { - case tar.TypeDir: - // Create directory - if err = os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil { - return nil, fmt.Errorf("failed to create directory %s: %w", targetPath, err) - } - case tar.TypeReg: - // Ensure the parent directory exists before creating the file - if err = os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil { - return nil, fmt.Errorf("failed to create parent directory for %s: %w", targetPath, err) - } - - // Create the file - outputFile, err := os.OpenFile(targetPath, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) - if err != nil { - return nil, fmt.Errorf("failed to create file %s: %w", targetPath, err) + if err == io.EOF { + err = nil } + break + } - // Write content - if _, err = io.Copy(outputFile, tarReader); err != nil { - err = outputFile.Close() - if err != nil { - return nil, err - } - return nil, fmt.Errorf("failed to write file content for %s: %w", targetPath, err) - } - tmpFilePaths = append(tmpFilePaths, targetPath) - err = outputFile.Close() - if err != nil { - return nil, err - } - default: - fmt.Printf("WARN: skipping unsupported file type %c: %s\n", header.Typeflag, targetPath) + if len(data) >= cap(data) { + d := append(data[:cap(data)], 0) + data = d[:len(data)] } } + if err != nil { + return nil, fmt.Errorf("failed to read tar data: %w", err) + } + + if err := json.Unmarshal(data, &jsonrpcCommands); err != nil { + return jsonrpcCommands, errors.New("cannot parse JSON " + archivePath + ": " + err.Error()) + } - return tmpFilePaths, nil + return jsonrpcCommands, nil } type JsonDiffKind int @@ -1862,26 +1857,10 @@ func runTest(ctx context.Context, descriptor *TestDescriptor, config *Config) (b var jsonrpcCommands []JSONRPCCommand var err error if isArchive(jsonFilename) { - tempFilePaths, err := extractArchive(jsonFilename, config.SanitizeArchiveExt) + jsonrpcCommands, err = extractArchive(jsonFilename, config.SanitizeArchiveExt) if err != nil { return false, errors.New("cannot extract archive file " + jsonFilename) } - removeTempFiles := func() { - for _, path := range tempFilePaths { - err := os.Remove(path) - if err != nil { - fmt.Printf("failed to remove temp file %s: %v\n", path, err) - } - } - } - for _, tempFilePath := range tempFilePaths { - jsonrpcCommands, err = extractJsonCommands(tempFilePath) - if err != nil { - removeTempFiles() - return false, err - } - } - removeTempFiles() } else { jsonrpcCommands, err = extractJsonCommands(jsonFilename) if err != nil { From c37352fd3ec322caf79a3b04912a551c546fbb1e Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 19 Dec 2025 10:56:24 +0100 Subject: [PATCH 23/87] integration: refactoring in extractArchive --- cmd/integration/main.go | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 0498e946..6facdc58 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -435,33 +435,7 @@ func extractArchive(archivePath string, sanitizeExtension bool) ([]JSONRPCComman return nil, fmt.Errorf("archive must contain a single JSON file, found %s", header.Name) } - size := header.Size - size++ // one byte for final read at EOF - if size < 512 { - size = 512 - } - data := make([]byte, 0, size) - for { - var n int - n, err = tarReader.Read(data[len(data):cap(data)]) - data = data[:len(data)+n] - if err != nil { - if err == io.EOF { - err = nil - } - break - } - - if len(data) >= cap(data) { - d := append(data[:cap(data)], 0) - data = d[:len(data)] - } - } - if err != nil { - return nil, fmt.Errorf("failed to read tar data: %w", err) - } - - if err := json.Unmarshal(data, &jsonrpcCommands); err != nil { + if err := json.NewDecoder(tarReader).Decode(&jsonrpcCommands); err != nil { return jsonrpcCommands, errors.New("cannot parse JSON " + archivePath + ": " + err.Error()) } From e9c979aaa030e8c1640a2b10c83dfd35e11b5c6a Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 19 Dec 2025 12:44:18 +0100 Subject: [PATCH 24/87] integration: better num workers for parallel exec --- cmd/integration/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 6facdc58..7fc719c8 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -2037,7 +2037,7 @@ func runMain() int { numWorkers := 1 if config.Parallel { - numWorkers = 10 // Adjust based on your needs + numWorkers = runtime.NumCPU() } ctx, cancelCtx := context.WithCancel(context.Background()) From e7b1dd98aa1fc2356ecd8dc20b28bf96c54671cd Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 19 Dec 2025 13:06:49 +0100 Subject: [PATCH 25/87] integration: avoid unnecessary bytes/string conversions integration: refactor executeRequest implementation --- cmd/integration/main.go | 209 +++++++++++++++++++++------------------- 1 file changed, 108 insertions(+), 101 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 7fc719c8..8beff39f 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1205,133 +1205,140 @@ func validateJsonRpcResponse(response any) error { return nil } -func executeRequest(ctx context.Context, config *Config, transportType, jwtAuth, requestDumps, target string) ([]byte, error) { - if transportType == "http" || transportType == "http_comp" || transportType == "https" { - headers := map[string]string{ - "Content-Type": "application/json", - } +func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtAuth, target string, request []byte) ([]byte, error) { + headers := map[string]string{ + "Content-Type": "application/json", + } - if transportType != "http_comp" { - headers["Accept-Encoding"] = "Identity" - } + if transportType != "http_comp" { + headers["Accept-Encoding"] = "Identity" + } - if jwtAuth != "" { - headers["Authorization"] = jwtAuth - } + if jwtAuth != "" { + headers["Authorization"] = jwtAuth + } - targetURL := target - if transportType == "https" { - targetURL = "https://" + target - } else { - targetURL = "http://" + target - } + targetURL := target + if transportType == "https" { + targetURL = "https://" + target + } else { + targetURL = "http://" + target + } - client := &http.Client{ - Timeout: 300 * time.Second, - } + client := &http.Client{ + Timeout: 300 * time.Second, + } - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, bytes.NewBufferString(requestDumps)) - if err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nhttp request creation fail: %s %v\n", targetURL, err) - } - return nil, err + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, bytes.NewBuffer(request)) + if err != nil { + if config.VerboseLevel > 0 { + fmt.Printf("\nhttp request creation fail: %s %v\n", targetURL, err) } + return nil, err + } - for k, v := range headers { - req.Header.Set(k, v) - } + for k, v := range headers { + req.Header.Set(k, v) + } - start := time.Now() - resp, err := client.Do(req) - elapsed := time.Since(start) - if config.VerboseLevel > 1 { - fmt.Printf("http round-trip time: %v\n", elapsed) + start := time.Now() + resp, err := client.Do(req) + elapsed := time.Since(start) + if config.VerboseLevel > 1 { + fmt.Printf("http round-trip time: %v\n", elapsed) + } + if err != nil { + if config.VerboseLevel > 0 { + fmt.Printf("\nhttp connection fail: %s %v\n", targetURL, err) } + return nil, err + } + defer func(Body io.ReadCloser) { + err := Body.Close() if err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nhttp connection fail: %s %v\n", targetURL, err) - } - return nil, err + fmt.Printf("\nfailed to close response body: %v\n", err) } - defer func(Body io.ReadCloser) { - err := Body.Close() - if err != nil { - fmt.Printf("\nfailed to close response body: %v\n", err) - } - }(resp.Body) + }(resp.Body) - if resp.StatusCode != http.StatusOK { - if config.VerboseLevel > 1 { - fmt.Printf("\npost result status_code: %d\n", resp.StatusCode) - } - return nil, fmt.Errorf("http status %v", resp.Status) + if resp.StatusCode != http.StatusOK { + if config.VerboseLevel > 1 { + fmt.Printf("\npost result status_code: %d\n", resp.StatusCode) } + return nil, fmt.Errorf("http status %v", resp.Status) + } - body, err := io.ReadAll(resp.Body) - if err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nfailed to read response body: %v\n", err) - } - return nil, err + body, err := io.ReadAll(resp.Body) + if err != nil { + if config.VerboseLevel > 0 { + fmt.Printf("\nfailed to read response body: %v\n", err) } + return nil, err + } - if config.VerboseLevel > 1 { - fmt.Printf("\nhttp response body: %s\n", string(body)) - } + if config.VerboseLevel > 1 { + fmt.Printf("\nhttp response body: %s\n", string(body)) + } - if config.VerboseLevel > 1 { - fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, requestDumps, string(body)) - } + if config.VerboseLevel > 1 { + fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(body)) + } - return body, nil - } else { - // WebSocket - wsTarget := "ws://" + target - dialer := websocket.Dialer{ - HandshakeTimeout: 300 * time.Second, - } + return body, nil +} - headers := http.Header{} - if jwtAuth != "" { - headers.Set("Authorization", jwtAuth) - } +func executeWebSocketRequest(config *Config, transportType, jwtAuth, target string, request []byte) ([]byte, error) { + wsTarget := "ws://" + target + dialer := websocket.Dialer{ + HandshakeTimeout: 300 * time.Second, + EnableCompression: strings.HasSuffix(transportType, "_comp"), + } - conn, _, err := dialer.Dial(wsTarget, headers) + headers := http.Header{} + if jwtAuth != "" { + headers.Set("Authorization", jwtAuth) + } + + conn, _, err := dialer.Dial(wsTarget, headers) + if err != nil { + if config.VerboseLevel > 0 { + fmt.Printf("\nwebsocket connection fail: %v\n", err) + } + return nil, err + } + defer func(conn *websocket.Conn) { + err := conn.Close() if err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nwebsocket connection fail: %v\n", err) - } - return nil, err + fmt.Printf("\nfailed to close websocket connection: %v\n", err) } - defer func(conn *websocket.Conn) { - err := conn.Close() - if err != nil { - fmt.Printf("\nfailed to close websocket connection: %v\n", err) - } - }(conn) + }(conn) - if err = conn.WriteMessage(websocket.TextMessage, []byte(requestDumps)); err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nwebsocket write fail: %v\n", err) - } - return nil, err + if err = conn.WriteMessage(websocket.BinaryMessage, request); err != nil { + if config.VerboseLevel > 0 { + fmt.Printf("\nwebsocket write fail: %v\n", err) } + return nil, err + } - _, message, err := conn.ReadMessage() - if err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nwebsocket read fail: %v\n", err) - } - return nil, err + _, message, err := conn.ReadMessage() + if err != nil { + if config.VerboseLevel > 0 { + fmt.Printf("\nwebsocket read fail: %v\n", err) } + return nil, err + } - if config.VerboseLevel > 1 { - fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, requestDumps, string(message)) - } + if config.VerboseLevel > 1 { + fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(message)) + } + + return message, nil +} - return message, nil +func executeRequest(ctx context.Context, config *Config, transportType, jwtAuth, target string, request []byte) ([]byte, error) { + if strings.HasPrefix(transportType, "http") { + return executeHttpRequest(ctx, config, transportType, jwtAuth, target, request) } + return executeWebSocketRequest(config, transportType, jwtAuth, target, request) } func runCompare(jsonDiff bool, errorFile, tempFile1, tempFile2, diffFile string) bool { @@ -1776,7 +1783,7 @@ func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *Te diffFile := outputAPIFilename + "-diff.json" if !config.VerifyWithDaemon { - result, err := executeRequest(ctx, config, transportType, jwtAuth, string(request), target) + result, err := executeRequest(ctx, config, transportType, jwtAuth, target, request) if err != nil { return false, err } @@ -1795,7 +1802,7 @@ func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *Te outputDirName, daemonFile, expRspFile, diffFile, descriptor) } else { target = getTarget(DaemonOnDefaultPort, method, config) - result, err := executeRequest(ctx, config, transportType, jwtAuth, string(request), target) + result, err := executeRequest(ctx, config, transportType, jwtAuth, target, request) if err != nil { return false, err } @@ -1806,7 +1813,7 @@ func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *Te return false, errors.New("response is nil (maybe node at " + target + " is down?)") } target1 = getTarget(config.DaemonAsReference, method, config) - result1, err := executeRequest(ctx, config, transportType, jwtAuth, string(request), target1) + result1, err := executeRequest(ctx, config, transportType, jwtAuth, target1, request) if err != nil { return false, err } From 945f408b2a7720ca83a64d8064ae9cff7530596e Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 19 Dec 2025 13:11:58 +0100 Subject: [PATCH 26/87] integration: renaming --- cmd/integration/main.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 8beff39f..8171ed90 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -374,7 +374,7 @@ func autodetectCompression(archivePath string, inFile *os.File) (string, error) } // extractArchive extracts a compressed or uncompressed tar archive. -func extractArchive(archivePath string, sanitizeExtension bool) ([]JSONRPCCommand, error) { +func extractArchive(archivePath string, sanitizeExtension bool) ([]JsonRpcCommand, error) { // Open the archive file inputFile, err := os.Open(archivePath) if err != nil { @@ -420,7 +420,7 @@ func extractArchive(archivePath string, sanitizeExtension bool) ([]JSONRPCComman reader = inputFile } - var jsonrpcCommands []JSONRPCCommand + var jsonrpcCommands []JsonRpcCommand // We expect the archive to contain a single JSON file tarReader := tar.NewReader(reader) @@ -535,7 +535,7 @@ type JsonRpcTest struct { Metadata *JsonRpcTestMetadata `json:"metadata"` } -type JSONRPCCommand struct { +type JsonRpcCommand struct { Request json.RawMessage `json:"request"` Response json.RawMessage `json:"response"` TestInfo *JsonRpcTest `json:"test"` @@ -1474,8 +1474,8 @@ func isArchive(jsonFilename string) bool { return !strings.HasSuffix(jsonFilename, ".json") } -func extractJsonCommands(jsonFilename string) ([]JSONRPCCommand, error) { - var jsonrpcCommands []JSONRPCCommand +func extractJsonCommands(jsonFilename string) ([]JsonRpcCommand, error) { + var jsonrpcCommands []JsonRpcCommand data, err := os.ReadFile(jsonFilename) if err != nil { return jsonrpcCommands, errors.New("cannot read file " + jsonFilename + ": " + err.Error()) @@ -1486,7 +1486,7 @@ func extractJsonCommands(jsonFilename string) ([]JSONRPCCommand, error) { return jsonrpcCommands, nil } -func (c *JSONRPCCommand) compareJSONFiles(kind JsonDiffKind, errorFileName, fileName1, fileName2, diffFileName string) (bool, error) { +func (c *JsonRpcCommand) compareJSONFiles(kind JsonDiffKind, errorFileName, fileName1, fileName2, diffFileName string) (bool, error) { switch kind { case JdLibrary: jsonNode1, err := jd.ReadJsonFile(fileName1) @@ -1532,7 +1532,7 @@ func (c *JSONRPCCommand) compareJSONFiles(kind JsonDiffKind, errorFileName, file } } -func (c *JSONRPCCommand) compareJSON(config *Config, response interface{}, jsonFile, daemonFile, expRspFile, diffFile string, testNumber int) (bool, error) { +func (c *JsonRpcCommand) compareJSON(config *Config, response interface{}, jsonFile, daemonFile, expRspFile, diffFile string, testNumber int) (bool, error) { baseName := filepath.Join(TempDirname, fmt.Sprintf("test_%d", testNumber)) err := os.MkdirAll(baseName, 0755) if err != nil { @@ -1624,7 +1624,7 @@ func (c *JSONRPCCommand) compareJSON(config *Config, response interface{}, jsonF return true, nil } -func (c *JSONRPCCommand) processResponse(response, result1, responseInFile []byte, config *Config, outputDir, daemonFile, expRspFile, diffFile string, descriptor *TestDescriptor) (bool, error) { +func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byte, config *Config, outputDir, daemonFile, expRspFile, diffFile string, descriptor *TestDescriptor) (bool, error) { jsonFile := descriptor.Name testNumber := descriptor.Number @@ -1744,7 +1744,7 @@ func (c *JSONRPCCommand) processResponse(response, result1, responseInFile []byt return same, nil } -func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *TestDescriptor) (bool, error) { +func (c *JsonRpcCommand) run(ctx context.Context, config *Config, descriptor *TestDescriptor) (bool, error) { transportType := descriptor.TransportType jsonFile := descriptor.Name request := c.Request @@ -1835,7 +1835,7 @@ func (c *JSONRPCCommand) run(ctx context.Context, config *Config, descriptor *Te func runTest(ctx context.Context, descriptor *TestDescriptor, config *Config) (bool, error) { jsonFilename := filepath.Join(config.JSONDir, descriptor.Name) - var jsonrpcCommands []JSONRPCCommand + var jsonrpcCommands []JsonRpcCommand var err error if isArchive(jsonFilename) { jsonrpcCommands, err = extractArchive(jsonFilename, config.SanitizeArchiveExt) From 96b5da2d5bfa28a9f995ec0ff89edc173b3d5528 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 19 Dec 2025 13:13:51 +0100 Subject: [PATCH 27/87] integration: remove unused code --- cmd/integration/main.go | 131 ---------------------------------------- 1 file changed, 131 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 8171ed90..abe75612 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -29,7 +29,6 @@ import ( "syscall" "time" - bzip2w "github.com/dsnet/compress/bzip2" "github.com/golang-jwt/jwt/v5" "github.com/gorilla/websocket" "github.com/josephburnett/jd/v2" @@ -198,136 +197,6 @@ func getCompressionType(filename string) string { return NoCompression } -// --- Archiving Logic --- - -// createArchive creates a compressed or uncompressed tar archive. -func createArchive(archivePath string, files []string) error { - // Create the output file - outFile, err := os.Create(archivePath) - if err != nil { - return fmt.Errorf("failed to create output file: %w", err) - } - defer func(outFile *os.File) { - err := outFile.Close() - if err != nil { - fmt.Printf("Failed to close output file: %v\n", err) - } - }(outFile) - - // Wrap the output file with the correct compression writer (if any) - var writer io.WriteCloser = outFile - compressionType := getCompressionType(archivePath) - - switch compressionType { - case GzipCompression: - writer = gzip.NewWriter(outFile) - case Bzip2Compression: - config := &bzip2w.WriterConfig{Level: bzip2w.BestCompression} - writer, err = bzip2w.NewWriter(outFile, config) - if err != nil { - return fmt.Errorf("failed to create bzip2 writer: %w", err) - } - } - - // Create the tar writer - tarWriter := tar.NewWriter(writer) - defer func(writer io.WriteCloser, tarWriter *tar.Writer) { - // Explicitly close the compression writer if it was used (before closing the tar writer) - if compressionType != NoCompression { - if err := writer.Close(); err != nil { - fmt.Printf("failed to close compression writer: %v\n", err) - } - } - - err := tarWriter.Close() - if err != nil { - fmt.Printf("Failed to close tar writer: %v\n", err) - } - }(writer, tarWriter) - - // Add files to the archive - for _, file := range files { - err := addFileToTar(tarWriter, file, "") - if err != nil { - return fmt.Errorf("failed to add file %s: %w", file, err) - } - } - - return nil -} - -// addFileToTar recursively adds a file or directory to the tar archive. -func addFileToTar(tarWriter *tar.Writer, filePath, baseDir string) error { - fileInfo, err := os.Stat(filePath) - if err != nil { - return err - } - - // Determine the name inside the archive (relative path) - var link string - if fileInfo.Mode()&os.ModeSymlink != 0 { - link, err = os.Readlink(filePath) - if err != nil { - return err - } - } - - // If baseDir is not empty, use the relative path, otherwise use the basename - nameInArchive := filePath - if baseDir != "" && strings.HasPrefix(filePath, baseDir) { - nameInArchive = filePath[len(baseDir)+1:] - } else { - nameInArchive = filepath.Base(filePath) - } - - // Create the tar Header - header, err := tar.FileInfoHeader(fileInfo, link) - if err != nil { - return err - } - header.Name = nameInArchive - - if err := tarWriter.WriteHeader(header); err != nil { - return err - } - - // Write file contents if it's a regular file - if fileInfo.Mode().IsRegular() { - file, err := os.Open(filePath) - if err != nil { - return err - } - if _, err := io.Copy(tarWriter, file); err != nil { - _ = file.Close() - return err - } - _ = file.Close() - } - - // Recurse into directories - if fileInfo.IsDir() { - dirEntries, err := os.ReadDir(filePath) - if err != nil { - return err - } - for _, entry := range dirEntries { - fullPath := filepath.Join(filePath, entry.Name()) - // Keep the original baseDir if it was set, otherwise set it to the current path's parent - newBaseDir := baseDir - if baseDir == "" { - // Special handling for the root call: use the current path as the new base. - // This ensures nested files have relative paths within the archive. - newBaseDir = filePath - } - if err := addFileToTar(tarWriter, fullPath, newBaseDir); err != nil { - return err - } - } - } - - return nil -} - func reopenFile(filePath string, file *os.File) (*os.File, error) { err := file.Close() if err != nil && !errors.Is(err, os.ErrClosed) { From 94f16e3497a323df1b739fbb884256759f839baf Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 19 Dec 2025 19:16:31 +0100 Subject: [PATCH 28/87] integration: add metrics --- cmd/integration/main.go | 261 +++++++++++++++++++++++---------------- integration/run_tests.py | 129 ++++++++++++++----- 2 files changed, 252 insertions(+), 138 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index abe75612..f162a2e2 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -197,26 +197,14 @@ func getCompressionType(filename string) string { return NoCompression } -func reopenFile(filePath string, file *os.File) (*os.File, error) { - err := file.Close() - if err != nil && !errors.Is(err, os.ErrClosed) { - return nil, err - } - file, err = os.Open(filePath) - if err != nil { - return nil, err - } - return file, nil -} - -func autodetectCompression(archivePath string, inFile *os.File) (string, error) { +func autodetectCompression(inFile *os.File) (string, error) { // Assume we have no compression and try to detect it if the tar header is invalid compressionType := NoCompression tarReader := tar.NewReader(inFile) _, err := tarReader.Next() if err != nil && !errors.Is(err, io.EOF) { - // Reopen the file and check if it's gzip encoded - inFile, err = reopenFile(archivePath, inFile) + // Reset the file position for read and check if it's gzip encoded + _, err = inFile.Seek(0, io.SeekStart) if err != nil { return compressionType, err } @@ -224,8 +212,8 @@ func autodetectCompression(archivePath string, inFile *os.File) (string, error) if err == nil { compressionType = GzipCompression } else { - // Reopen the file and check if it's bzip2 encoded - inFile, err = reopenFile(archivePath, inFile) + // Reset the file position for read and check if it's gzip encoded + _, err = inFile.Seek(0, io.SeekStart) if err != nil { return compressionType, err } @@ -234,16 +222,12 @@ func autodetectCompression(archivePath string, inFile *os.File) (string, error) compressionType = Bzip2Compression } } - err = inFile.Close() - if err != nil { - return compressionType, err - } } return compressionType, nil } // extractArchive extracts a compressed or uncompressed tar archive. -func extractArchive(archivePath string, sanitizeExtension bool) ([]JsonRpcCommand, error) { +func extractArchive(archivePath string, sanitizeExtension bool, metrics *TestMetrics) ([]JsonRpcCommand, error) { // Open the archive file inputFile, err := os.Open(archivePath) if err != nil { @@ -257,7 +241,7 @@ func extractArchive(archivePath string, sanitizeExtension bool) ([]JsonRpcComman compressionType := getCompressionType(archivePath) if compressionType == NoCompression { // Possibly handle the corner case where the file is compressed but has tar extension - compressionType, err = autodetectCompression(archivePath, inputFile) + compressionType, err = autodetectCompression(inputFile) if err != nil { return nil, fmt.Errorf("failed to autodetect compression for archive: %w", err) } @@ -304,9 +288,11 @@ func extractArchive(archivePath string, sanitizeExtension bool) ([]JsonRpcComman return nil, fmt.Errorf("archive must contain a single JSON file, found %s", header.Name) } + start := time.Now() if err := json.NewDecoder(tarReader).Decode(&jsonrpcCommands); err != nil { return jsonrpcCommands, errors.New("cannot parse JSON " + archivePath + ": " + err.Error()) } + metrics.UnmarshallingTime += time.Since(start) return jsonrpcCommands, nil } @@ -375,9 +361,21 @@ type Config struct { TraceFile string } -type TestResult struct { +type TestMetrics struct { + RoundTripTime time.Duration + MarshallingTime time.Duration + UnmarshallingTime time.Duration + ComparisonCount int +} + +type TestOutcome struct { Success bool Error error + Metrics TestMetrics +} + +type TestResult struct { + Outcome TestOutcome Test *TestDescriptor } @@ -693,11 +691,13 @@ func usage() { } func getTarget(targetType, method string, config *Config) string { + isEngine := strings.HasPrefix(method, "engine_") + if targetType == ExternalProvider { return config.ExternalProviderURL } - if config.VerifyWithDaemon && targetType == DaemonOnOtherPort && strings.Contains(method, "engine_") { + if config.VerifyWithDaemon && targetType == DaemonOnOtherPort && isEngine { return config.DaemonOnHost + ":51516" } @@ -705,7 +705,7 @@ func getTarget(targetType, method string, config *Config) string { return config.DaemonOnHost + ":51515" } - if targetType == DaemonOnOtherPort && strings.Contains(method, "engine_") { + if targetType == DaemonOnOtherPort && isEngine { return config.DaemonOnHost + ":51516" } @@ -713,7 +713,7 @@ func getTarget(targetType, method string, config *Config) string { return config.DaemonOnHost + ":51515" } - if strings.Contains(method, "engine_") { + if isEngine { port := config.EnginePort if port == 0 { port = 8551 @@ -1074,7 +1074,7 @@ func validateJsonRpcResponse(response any) error { return nil } -func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtAuth, target string, request []byte) ([]byte, error) { +func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtAuth, target string, request []byte, metrics *TestMetrics) ([]byte, error) { headers := map[string]string{ "Content-Type": "application/json", } @@ -1113,6 +1113,7 @@ func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtA start := time.Now() resp, err := client.Do(req) elapsed := time.Since(start) + metrics.RoundTripTime = elapsed if config.VerboseLevel > 1 { fmt.Printf("http round-trip time: %v\n", elapsed) } @@ -1155,7 +1156,7 @@ func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtA return body, nil } -func executeWebSocketRequest(config *Config, transportType, jwtAuth, target string, request []byte) ([]byte, error) { +func executeWebSocketRequest(config *Config, transportType, jwtAuth, target string, request []byte, metrics *TestMetrics) ([]byte, error) { wsTarget := "ws://" + target dialer := websocket.Dialer{ HandshakeTimeout: 300 * time.Second, @@ -1181,6 +1182,7 @@ func executeWebSocketRequest(config *Config, transportType, jwtAuth, target stri } }(conn) + start := time.Now() if err = conn.WriteMessage(websocket.BinaryMessage, request); err != nil { if config.VerboseLevel > 0 { fmt.Printf("\nwebsocket write fail: %v\n", err) @@ -1195,6 +1197,7 @@ func executeWebSocketRequest(config *Config, transportType, jwtAuth, target stri } return nil, err } + metrics.RoundTripTime = time.Since(start) if config.VerboseLevel > 1 { fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(message)) @@ -1203,11 +1206,11 @@ func executeWebSocketRequest(config *Config, transportType, jwtAuth, target stri return message, nil } -func executeRequest(ctx context.Context, config *Config, transportType, jwtAuth, target string, request []byte) ([]byte, error) { +func executeRequest(ctx context.Context, config *Config, transportType, jwtAuth, target string, request []byte, metrics *TestMetrics) ([]byte, error) { if strings.HasPrefix(transportType, "http") { - return executeHttpRequest(ctx, config, transportType, jwtAuth, target, request) + return executeHttpRequest(ctx, config, transportType, jwtAuth, target, request, metrics) } - return executeWebSocketRequest(config, transportType, jwtAuth, target, request) + return executeWebSocketRequest(config, transportType, jwtAuth, target, request, metrics) } func runCompare(jsonDiff bool, errorFile, tempFile1, tempFile2, diffFile string) bool { @@ -1343,15 +1346,17 @@ func isArchive(jsonFilename string) bool { return !strings.HasSuffix(jsonFilename, ".json") } -func extractJsonCommands(jsonFilename string) ([]JsonRpcCommand, error) { +func extractJsonCommands(jsonFilename string, metrics *TestMetrics) ([]JsonRpcCommand, error) { var jsonrpcCommands []JsonRpcCommand data, err := os.ReadFile(jsonFilename) if err != nil { return jsonrpcCommands, errors.New("cannot read file " + jsonFilename + ": " + err.Error()) } + start := time.Now() if err := json.Unmarshal(data, &jsonrpcCommands); err != nil { return jsonrpcCommands, errors.New("cannot parse JSON " + jsonFilename + ": " + err.Error()) } + metrics.UnmarshallingTime += time.Since(start) return jsonrpcCommands, nil } @@ -1401,7 +1406,7 @@ func (c *JsonRpcCommand) compareJSONFiles(kind JsonDiffKind, errorFileName, file } } -func (c *JsonRpcCommand) compareJSON(config *Config, response interface{}, jsonFile, daemonFile, expRspFile, diffFile string, testNumber int) (bool, error) { +func (c *JsonRpcCommand) compareJSON(config *Config, response interface{}, jsonFile, daemonFile, expRspFile, diffFile string, testNumber int, metrics *TestMetrics) (bool, error) { baseName := filepath.Join(TempDirname, fmt.Sprintf("test_%d", testNumber)) err := os.MkdirAll(baseName, 0755) if err != nil { @@ -1459,6 +1464,8 @@ func (c *JsonRpcCommand) compareJSON(config *Config, response interface{}, jsonF diffResult, err := c.compareJSONFiles(config.DiffKind, errorFile, tempFile1, tempFile2, diffFile) diffFileSize := int64(0) + metrics.ComparisonCount += 1 + if diffResult { fileInfo, err := os.Stat(diffFile) if err != nil { @@ -1493,7 +1500,7 @@ func (c *JsonRpcCommand) compareJSON(config *Config, response interface{}, jsonF return true, nil } -func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byte, config *Config, outputDir, daemonFile, expRspFile, diffFile string, descriptor *TestDescriptor) (bool, error) { +func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byte, config *Config, outputDir, daemonFile, expRspFile, diffFile string, descriptor *TestDescriptor, outcome *TestOutcome) { jsonFile := descriptor.Name testNumber := descriptor.Number @@ -1507,35 +1514,49 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt if config.WithoutCompareResults { err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { - return false, err + outcome.Error = err + return } - return true, nil + outcome.Success = true + return } var responseMap map[string]interface{} var respIsMap bool + start := time.Now() if err := json.Unmarshal(response, &responseMap); err == nil { + outcome.Metrics.UnmarshallingTime += time.Since(start) respIsMap = true + start = time.Now() response, err = json.Marshal(responseMap) if err != nil { - return false, err + outcome.Error = err + return } + outcome.Metrics.MarshallingTime += time.Since(start) err = validateJsonRpcResponse(responseMap) if err != nil { - return false, err + outcome.Error = err + return } } var expectedMap map[string]interface{} var expIsMap bool + start = time.Now() if err := json.Unmarshal(expectedResponse, &expectedMap); err == nil { + outcome.Metrics.UnmarshallingTime += time.Since(start) expIsMap = true + start := time.Now() expectedResponse, err = json.Marshal(expectedMap) if err != nil { - return false, err + outcome.Error = err + return } + outcome.Metrics.MarshallingTime += time.Since(start) err = validateJsonRpcResponse(expectedMap) if err != nil { - return false, err + outcome.Error = err + return } } @@ -1543,9 +1564,11 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt if bytes.Equal(response, expectedResponse) { err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { - return false, err + outcome.Error = err + return } - return true, nil + outcome.Success = true + return } // Check various conditions where we don't care about differences @@ -1557,84 +1580,81 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt if responseHasResult && expectedHasResult && expectedResult == nil && result1 == nil { err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { - return false, err + outcome.Error = err + return } - return true, nil + outcome.Success = true + return } if responseHasError && expectedHasError && expectedError == nil { err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { - return false, err + outcome.Error = err + return } - return true, nil + outcome.Success = true + return } // TODO: improve len(expectedMap) == 2 which means: just "jsonrpc" and "id" are expected if !expectedHasResult && !expectedHasError && len(expectedMap) == 2 { err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { - return false, err + outcome.Error = err + return } - return true, nil + outcome.Success = true + return } if responseHasError && expectedHasError && config.DoNotCompareError { err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { - return false, err + outcome.Error = err + return } - return true, nil + outcome.Success = true + return } } // We need to compare the response and expectedResponse, so we dump them to files first err := dumpJSONs(true, daemonFile, expRspFile, outputDir, response, expectedResponse) if err != nil { - return false, err + outcome.Error = err + return } - same, err := c.compareJSON(config, responseMap, jsonFile, daemonFile, expRspFile, diffFile, testNumber) + same, err := c.compareJSON(config, responseMap, jsonFile, daemonFile, expRspFile, diffFile, testNumber, &outcome.Metrics) if err != nil { - return same, err + outcome.Error = err + return } if same && !config.ForceDumpJSONs { err := os.Remove(daemonFile) if err != nil { - return false, err + outcome.Error = err + return } err = os.Remove(expRspFile) if err != nil { - return false, err + outcome.Error = err + return } err = os.Remove(diffFile) if err != nil { - return false, err + outcome.Error = err + return } } - return same, nil + outcome.Success = same } -func (c *JsonRpcCommand) run(ctx context.Context, config *Config, descriptor *TestDescriptor) (bool, error) { +func (c *JsonRpcCommand) run(ctx context.Context, config *Config, descriptor *TestDescriptor, outcome *TestOutcome) { transportType := descriptor.TransportType jsonFile := descriptor.Name request := c.Request - method := "" - var requestMap map[string]interface{} - if err := json.Unmarshal(request, &requestMap); err == nil { - if m, ok := requestMap["method"].(string); ok { - method = m - } - } else { - // Try an array of requests - var requestArray []map[string]interface{} - if err := json.Unmarshal(request, &requestArray); err == nil && len(requestArray) > 0 { - if m, ok := requestArray[0]["method"].(string); ok { - method = m - } - } - } - - target := getTarget(config.DaemonUnderTest, method, config) + target := getTarget(config.DaemonUnderTest, descriptor.Name, config) target1 := "" var jwtAuth string @@ -1652,79 +1672,89 @@ func (c *JsonRpcCommand) run(ctx context.Context, config *Config, descriptor *Te diffFile := outputAPIFilename + "-diff.json" if !config.VerifyWithDaemon { - result, err := executeRequest(ctx, config, transportType, jwtAuth, target, request) + result, err := executeRequest(ctx, config, transportType, jwtAuth, target, request, &outcome.Metrics) if err != nil { - return false, err + outcome.Error = err + return } if config.VerboseLevel > 2 { fmt.Printf("%s: [%v]\n", config.DaemonUnderTest, result) } if result == nil { - return false, errors.New("response is n il (maybe node at " + target + " is down?)") + outcome.Error = errors.New("response is n il (maybe node at " + target + " is down?)") + return } responseInFile := c.Response daemonFile := outputAPIFilename + "-response.json" expRspFile := outputAPIFilename + "-expResponse.json" - return c.processResponse(result, nil, responseInFile, config, - outputDirName, daemonFile, expRspFile, diffFile, descriptor) + c.processResponse(result, nil, responseInFile, config, outputDirName, daemonFile, expRspFile, diffFile, descriptor, outcome) } else { - target = getTarget(DaemonOnDefaultPort, method, config) - result, err := executeRequest(ctx, config, transportType, jwtAuth, target, request) + target = getTarget(DaemonOnDefaultPort, descriptor.Name, config) + result, err := executeRequest(ctx, config, transportType, jwtAuth, target, request, &outcome.Metrics) if err != nil { - return false, err + outcome.Error = err + return } if config.VerboseLevel > 2 { fmt.Printf("%s: [%v]\n", config.DaemonUnderTest, result) } if result == nil { - return false, errors.New("response is nil (maybe node at " + target + " is down?)") + outcome.Error = errors.New("response is nil (maybe node at " + target + " is down?)") + return } - target1 = getTarget(config.DaemonAsReference, method, config) - result1, err := executeRequest(ctx, config, transportType, jwtAuth, target1, request) + target1 = getTarget(config.DaemonAsReference, descriptor.Name, config) + result1, err := executeRequest(ctx, config, transportType, jwtAuth, target1, request, &outcome.Metrics) if err != nil { - return false, err + outcome.Error = err + return } if config.VerboseLevel > 2 { fmt.Printf("%s: [%v]\n", config.DaemonAsReference, result1) } if result1 == nil { - return false, errors.New("response is nil (maybe node at " + target1 + " is down?)") + outcome.Error = errors.New("response is nil (maybe node at " + target1 + " is down?)") + return } daemonFile := outputAPIFilename + getJSONFilenameExt(DaemonOnDefaultPort, target) expRspFile := outputAPIFilename + getJSONFilenameExt(config.DaemonAsReference, target1) - return c.processResponse(result, result1, nil, config, - outputDirName, daemonFile, expRspFile, diffFile, descriptor) + c.processResponse(result, result1, nil, config, outputDirName, daemonFile, expRspFile, diffFile, descriptor, outcome) + return } } -func runTest(ctx context.Context, descriptor *TestDescriptor, config *Config) (bool, error) { +func runTest(ctx context.Context, descriptor *TestDescriptor, config *Config) TestOutcome { jsonFilename := filepath.Join(config.JSONDir, descriptor.Name) + outcome := TestOutcome{} + var jsonrpcCommands []JsonRpcCommand var err error if isArchive(jsonFilename) { - jsonrpcCommands, err = extractArchive(jsonFilename, config.SanitizeArchiveExt) + jsonrpcCommands, err = extractArchive(jsonFilename, config.SanitizeArchiveExt, &outcome.Metrics) if err != nil { - return false, errors.New("cannot extract archive file " + jsonFilename) + outcome.Error = errors.New("cannot extract archive file " + jsonFilename) + return outcome } } else { - jsonrpcCommands, err = extractJsonCommands(jsonFilename) + jsonrpcCommands, err = extractJsonCommands(jsonFilename, &outcome.Metrics) if err != nil { - return false, err + outcome.Error = err + return outcome } } - for _, jsonrpcCmd := range jsonrpcCommands { - return jsonrpcCmd.run(ctx, config, descriptor) // TODO: support multiple tests + if len(jsonrpcCommands) != 1 { + outcome.Error = errors.New("expected exactly one JSON RPC command in " + jsonFilename) + return outcome } - fmt.Printf("WARN: no commands found in test %s\n", jsonFilename) + jsonrpcCommands[0].run(ctx, config, descriptor, &outcome) - return true, nil + return outcome } func mustAtoi(s string) int { @@ -1741,6 +1771,11 @@ type ResultCollector struct { successTests int failedTests int executedTests int + + totalRoundTripTime time.Duration + totalMarshallingTime time.Duration + totalUnmarshallingTime time.Duration + totalComparisonCount int } func newResultCollector(resultsChan chan chan TestResult, config *Config) *ResultCollector { @@ -1762,16 +1797,20 @@ func (c *ResultCollector) start(ctx context.Context, cancelCtx context.CancelFun tt := fmt.Sprintf("%-15s", result.Test.TransportType) fmt.Printf("%04d. %s::%s ", result.Test.Number, tt, file) - if result.Success { + if result.Outcome.Success { c.successTests++ if c.config.VerboseLevel > 0 { fmt.Println("OK") } else { fmt.Print("OK\r") } + c.totalRoundTripTime += result.Outcome.Metrics.RoundTripTime + c.totalMarshallingTime += result.Outcome.Metrics.MarshallingTime + c.totalUnmarshallingTime += result.Outcome.Metrics.UnmarshallingTime + c.totalComparisonCount += result.Outcome.Metrics.ComparisonCount } else { c.failedTests++ - fmt.Printf("failed: %s\n", result.Error.Error()) + fmt.Printf("failed: %s\n", result.Outcome.Error.Error()) if c.config.ExitOnFail { // Signal other tasks to stop and exit cancelCtx() @@ -1908,8 +1947,8 @@ func runMain() int { // Worker pool for parallel execution var wg sync.WaitGroup - testsChan := make(chan *TestDescriptor, 10000) - resultsChan := make(chan chan TestResult, 10000) + testsChan := make(chan *TestDescriptor, 2000) + resultsChan := make(chan chan TestResult, 2000) numWorkers := 1 if config.Parallel { @@ -1929,8 +1968,8 @@ func runMain() int { if test == nil { return } - success, err := runTest(ctx, test, config) - test.ResultChan <- TestResult{Success: success, Error: err, Test: test} + testOutcome := runTest(ctx, test, config) + test.ResultChan <- TestResult{Outcome: testOutcome, Test: test} case <-ctx.Done(): return } @@ -2067,6 +2106,12 @@ func runMain() int { shouldRun := false if config.TestingAPIsWith == "" && config.TestingAPIs == "" && (config.ReqTestNumber == -1 || config.ReqTestNumber == testNumberInAnyLoop) { shouldRun = true + /*if slices.Contains([]int{29, 37, 133, 173, 1008, 1272, 1274}, testNumberInAnyLoop) { + file := fmt.Sprintf("%-60s", jsonTestFullName) + tt := fmt.Sprintf("%-15s", transportType) + fmt.Printf("%04d. %s::%s skipped as long-running\n", testNumberInAnyLoop, tt, file) + shouldRun = false + }*/ } else if config.TestingAPIsWith != "" && checkTestNameForNumber(testName, config.ReqTestNumber) { shouldRun = true } else if config.TestingAPIs != "" && checkTestNameForNumber(testName, config.ReqTestNumber) { @@ -2146,6 +2191,10 @@ func runMain() int { // Print results elapsed := time.Since(startTime) fmt.Println("\n ") + fmt.Printf("Total HTTP round-trip time: %v\n", resultsCollector.totalRoundTripTime) + fmt.Printf("Total Marshalling time: %v\n", resultsCollector.totalMarshallingTime) + fmt.Printf("Total Unmarshalling time: %v\n", resultsCollector.totalUnmarshallingTime) + fmt.Printf("Total Comparison count: %v\n", resultsCollector.totalComparisonCount) fmt.Printf("Test session duration: %v\n", elapsed) fmt.Printf("Test session iterations: %d\n", testRep) fmt.Printf("Test suite total APIs: %d\n", availableTestedAPIs) diff --git a/integration/run_tests.py b/integration/run_tests.py index 489c04b2..c9b9a237 100755 --- a/integration/run_tests.py +++ b/integration/run_tests.py @@ -2,7 +2,7 @@ """ Run the JSON RPC API curl commands as integration tests """ from typing import Optional -from datetime import datetime +from datetime import datetime, timedelta import getopt import gzip import json @@ -692,7 +692,21 @@ def get_json_from_response(target, msg, verbose_level: int, result): return None, error_msg -def dump_jsons(dump_json, daemon_file, exp_rsp_file, output_dir, response, expected_response: str): +class TestMetrics: + def __init__(self): + self.round_trip_time = timedelta(0) + self.marshalling_time = timedelta(0) + self.unmarshalling_time = timedelta(0) + + +class TestOutcome: + def __init__(self, return_code: int = 0, error_msg: str = ''): + self.return_code = return_code + self.error_msg = error_msg + self.metrics = TestMetrics() + + +def dump_jsons(dump_json, daemon_file, exp_rsp_file, output_dir, response, expected_response: str, outcome: TestOutcome): """ dump jsons on result dir """ if not dump_json: return @@ -708,19 +722,23 @@ def dump_jsons(dump_json, daemon_file, exp_rsp_file, output_dir, response, expec if os.path.exists(daemon_file): os.remove(daemon_file) with open(daemon_file, 'w', encoding='utf8') as json_file_ptr: + start_time = datetime.now() json_file_ptr.write(json.dumps(response, indent=2, sort_keys=True)) + outcome.metrics.marshalling_time += (datetime.now() - start_time) if exp_rsp_file != "": if os.path.exists(exp_rsp_file): os.remove(exp_rsp_file) with open(exp_rsp_file, 'w', encoding='utf8') as json_file_ptr: + start_time = datetime.now() json_file_ptr.write(json.dumps(expected_response, indent=2, sort_keys=True)) + outcome.metrics.marshalling_time += (datetime.now() - start_time) break except OSError as e: print("Exception on file write: .. ", {e}, attempt) -def execute_request(transport_type: str, jwt_auth, request_dumps, target: str, verbose_level: int): +def execute_request(transport_type: str, jwt_auth, request_dumps, target: str, verbose_level: int, metrics: TestMetrics): """ execute request on server identified by target """ if transport_type in ("http", 'http_comp', 'https'): http_headers = {'content-type': 'application/json'} @@ -732,14 +750,18 @@ def execute_request(transport_type: str, jwt_auth, request_dumps, target: str, v target_url = ("https://" if transport_type == "https" else "http://") + target try: + start_time = datetime.now() rsp = requests.post(target_url, data=request_dumps, headers=http_headers, timeout=300) + metrics.round_trip_time += (datetime.now() - start_time) if rsp.status_code != 200: if verbose_level > 1: print("\npost result status_code: ", rsp.status_code) return "" if verbose_level > 1: print("\npost result content: ", rsp.content) + start_time = datetime.now() result = rsp.json() + metrics.unmarshalling_time += (datetime.now() - start_time) except OSError as e: if verbose_level: print("\nhttp connection fail: ", target_url, e) @@ -763,9 +785,13 @@ def execute_request(transport_type: str, jwt_auth, request_dumps, target: str, v http_headers['Authorization'] = jwt_auth with connect(ws_target, max_size=1000048576, compression=selected_compression, extensions=curr_extensions, open_timeout=None) as websocket: + start_time = datetime.now() websocket.send(request_dumps) rsp = websocket.recv(None) + metrics.round_trip_time += (datetime.now() - start_time) + start_time = datetime.now() result = json.loads(rsp) + metrics.unmarshalling_time += (datetime.now() - start_time) except OSError as e: if verbose_level: @@ -883,47 +909,60 @@ def compare_json(config, response, json_file, daemon_file, exp_rsp_file, diff_fi def process_response(target, target1, result, result1: str, response_in_file, config, - output_dir: str, daemon_file: str, exp_rsp_file: str, diff_file: str, json_file: str, test_number: int): + output_dir: str, daemon_file: str, exp_rsp_file: str, diff_file: str, json_file: str, test_number: int, outcome: TestOutcome): """ Process the response If exact result or error don't care, they are null but present in expected_response. """ response, error_msg = get_json_from_response(target, config.daemon_under_test, config.verbose_level, result) if response is None: - return 0, error_msg + outcome.return_code = 0 + outcome.error_msg = error_msg + return if result1 != "": expected_response, error_msg = get_json_from_response(target1, config.daemon_as_reference, config.verbose_level, result1) if expected_response is None: - return 0, error_msg + outcome.return_code = 0 + outcome.error_msg = error_msg + return else: expected_response = response_in_file if config.without_compare_results is True: - dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response) - return 1, "" + dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response, outcome) + outcome.return_code = 1 + return if response is None: - return 0, "Failed [" + config.daemon_under_test + "] (server doesn't response)" + outcome.return_code = 0 + outcome.error_msg = "Failed [" + config.daemon_under_test + "] (server doesn't response)" + return if expected_response is None: - return 0, "Failed [" + config.daemon_as_reference + "] (server doesn't response)" + outcome.return_code = 0 + outcome.error_msg = "Failed [" + config.daemon_as_reference + "] (server doesn't response)" + return if response != expected_response: if "result" in response and "result" in expected_response and expected_response["result"] is None and result1 == "": # response and expected_response are different but don't care - dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response) - return 1, "" + dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response, outcome) + outcome.return_code = 1 + return if "error" in response and "error" in expected_response and expected_response["error"] is None: # response and expected_response are different but don't care - dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response) - return 1, "" + dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response, outcome) + outcome.return_code = 1 + return if "error" not in expected_response and "result" not in expected_response and not isinstance(expected_response, list) and len(expected_response) == 2: # response and expected_response are different but don't care - dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response) - return 1, "" + dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response, outcome) + outcome.return_code = 1 + return if "error" in response and "error" in expected_response and config.do_not_compare_error: - dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response) - return 1, "" - dump_jsons(True, daemon_file, exp_rsp_file, output_dir, response, expected_response) + dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response, outcome) + outcome.return_code = 1 + return + dump_jsons(True, daemon_file, exp_rsp_file, output_dir, response, expected_response, outcome) same, error_msg = compare_json(config, response, json_file, daemon_file, exp_rsp_file, diff_file, test_number) # cleanup @@ -937,11 +976,14 @@ def process_response(target, target1, result, result1: str, response_in_file, co except OSError: pass - dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response) - return same, error_msg + dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response, outcome) + outcome.return_code = same + outcome.error_msg = error_msg + return - dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response) - return 1, "" + dump_jsons(config.force_dump_jsons, daemon_file, exp_rsp_file, output_dir, response, expected_response, outcome) + outcome.return_code = 1 + return def run_test(json_file: str, test_number, transport_type, config): @@ -949,22 +991,30 @@ def run_test(json_file: str, test_number, transport_type, config): json_filename = config.json_dir + json_file ext = os.path.splitext(json_file)[1] + outcome = TestOutcome() if ext in (".zip", ".tar"): with tarfile.open(json_filename, encoding='utf-8') as tar: files = tar.getmembers() if len(files) != 1: - return 0, "bad archive file " + json_filename + outcome.error_msg = "bad archive file " + json_filename + return outcome file = tar.extractfile(files[0]) buff = file.read() tar.close() + start_time = datetime.now() jsonrpc_commands = json.loads(buff) + outcome.metrics.unmarshalling_time += (datetime.now() - start_time) elif ext in ".gzip": with gzip.open(json_filename, 'rb') as zipped_file: buff = zipped_file.read() + start_time = datetime.now() jsonrpc_commands = json.loads(buff) + outcome.metrics.unmarshalling_time += (datetime.now() - start_time) else: with open(json_filename, encoding='utf8') as json_file_ptr: + start_time = datetime.now() jsonrpc_commands = json.load(json_file_ptr) + outcome.metrics.unmarshalling_time += (datetime.now() - start_time) for json_rpc in jsonrpc_commands: request = json_rpc["request"] try: @@ -974,7 +1024,9 @@ def run_test(json_file: str, test_number, transport_type, config): method = request[0]["method"] except KeyError: method = "" + start_time = datetime.now() request_dumps = json.dumps(request) + outcome.metrics.marshalling_time += (datetime.now() - start_time) target = get_target(config.daemon_under_test, method, config) target1 = "" if config.jwt_secret == "": @@ -984,7 +1036,7 @@ def run_test(json_file: str, test_number, transport_type, config): encoded = jwt.encode({"iat": datetime.now(pytz.utc)}, byte_array_secret, algorithm="HS256") jwt_auth = "Bearer " + str(encoded) if config.verify_with_daemon is False: # compare daemon result with file - result = execute_request(transport_type, jwt_auth, request_dumps, target, config.verbose_level) + result = execute_request(transport_type, jwt_auth, request_dumps, target, config.verbose_level, outcome.metrics) result1 = "" response_in_file = json_rpc["response"] @@ -997,9 +1049,9 @@ def run_test(json_file: str, test_number, transport_type, config): else: # run tests with two servers target = get_target(DAEMON_ON_DEFAULT_PORT, method, config) - result = execute_request(transport_type, jwt_auth, request_dumps, target, config.verbose_level) + result = execute_request(transport_type, jwt_auth, request_dumps, target, config.verbose_level, outcome.metrics) target1 = get_target(config.daemon_as_reference, method, config) - result1 = execute_request(transport_type, jwt_auth, request_dumps, target1, config.verbose_level) + result1 = execute_request(transport_type, jwt_auth, request_dumps, target1, config.verbose_level, outcome.metrics) response_in_file = None output_api_filename = config.output_dir + os.path.splitext(json_file)[0] @@ -1009,7 +1061,7 @@ def run_test(json_file: str, test_number, transport_type, config): daemon_file = output_api_filename + get_json_filename_ext(DAEMON_ON_DEFAULT_PORT, target) exp_rsp_file = output_api_filename + get_json_filename_ext(config.daemon_as_reference, target1) - return process_response( + process_response( target, target1, result, @@ -1021,7 +1073,9 @@ def run_test(json_file: str, test_number, transport_type, config): exp_rsp_file, diff_file, json_file, - test_number) + test_number, + outcome) + return outcome def extract_number(filename): @@ -1100,6 +1154,10 @@ def main(argv) -> int: global_test_number = 0 available_tested_apis = 0 + total_round_trip_time = timedelta(0) + total_marshalling_time = timedelta(0) + total_unmarshalling_time = timedelta(0) + test_rep = 0 test_results = [] # Store test results for JSON report try: for test_rep in range(0, config.loop_number): # makes tests more times @@ -1194,7 +1252,11 @@ def main(argv) -> int: curr_future.cancel() continue print(f"{curr_test_number_in_any_loop:04d}. {curr_tt}::{file} ", end='', flush=True) - result, error_msg = curr_future.result() + test_outcome = curr_future.result() + result, error_msg = test_outcome.return_code, test_outcome.error_msg + total_round_trip_time += test_outcome.metrics.round_trip_time + total_marshalling_time += test_outcome.metrics.marshalling_time + total_unmarshalling_time += test_outcome.metrics.unmarshalling_time if result == 1: success_tests = success_tests + 1 if config.verbose_level: @@ -1239,10 +1301,13 @@ def main(argv) -> int: # print results at the end of all the tests elapsed = datetime.now() - start_time print(" \r") + print(f"Total round_trip time: {str(total_round_trip_time)}") + print(f"Total marshalling time: {str(total_marshalling_time)}") + print(f"Total unmarshalling time: {str(total_unmarshalling_time)}") print(f"Test time-elapsed: {str(elapsed)}") - print(f"Available tests: {global_test_number}") + print(f"Available tests: {global_test_number - 1}") print(f"Available tested api: {available_tested_apis}") - print(f"Number of loop: {config.loop_number}") + print(f"Number of loop: {test_rep + 1}") print(f"Number of executed tests: {executed_tests}") print(f"Number of NOT executed tests: {tests_not_executed}") print(f"Number of success tests: {success_tests}") From a28674ba7c5111b4cb2d34b832f0317fe8fca0b4 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sat, 20 Dec 2025 09:18:40 +0100 Subject: [PATCH 29/87] avoid copy and replace --- cmd/integration/main.go | 71 +---------------------------------------- 1 file changed, 1 insertion(+), 70 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index f162a2e2..3d309d5d 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1407,65 +1407,10 @@ func (c *JsonRpcCommand) compareJSONFiles(kind JsonDiffKind, errorFileName, file } func (c *JsonRpcCommand) compareJSON(config *Config, response interface{}, jsonFile, daemonFile, expRspFile, diffFile string, testNumber int, metrics *TestMetrics) (bool, error) { - baseName := filepath.Join(TempDirname, fmt.Sprintf("test_%d", testNumber)) - err := os.MkdirAll(baseName, 0755) - if err != nil { - return false, err - } - - tempFile1 := filepath.Join(baseName, "daemon_lower_case.txt") - tempFile2 := filepath.Join(baseName, "rpc_lower_case.txt") - errorFile := filepath.Join(baseName, "ERROR.txt") - - // Check if response contains error - responseMap, isMap := response.(map[string]interface{}) - hasError := isMap && responseMap["error"] != nil - - if hasError { - err := toLowerCase(daemonFile, tempFile1) - if err != nil { - return false, err - } - err = toLowerCase(expRspFile, tempFile2) - if err != nil { - return false, err - } - } else { - _, err := copyFile(daemonFile, tempFile1) - if err != nil { - return false, err - } - _, err = copyFile(expRspFile, tempFile2) - if err != nil { - return false, err - } - } - - if isNotComparedMessage(jsonFile, config.Net) { - err := replaceMessage(expRspFile, tempFile1, "message") - if err != nil { - return false, err - } - err = replaceMessage(daemonFile, tempFile2, "message") - if err != nil { - return false, err - } - } else if isNotComparedError(jsonFile, config.Net) { - err := replaceMessage(expRspFile, tempFile1, "error") - if err != nil { - return false, err - } - err = replaceMessage(daemonFile, tempFile2, "error") - if err != nil { - return false, err - } - } - - diffResult, err := c.compareJSONFiles(config.DiffKind, errorFile, tempFile1, tempFile2, diffFile) diffFileSize := int64(0) - metrics.ComparisonCount += 1 + diffResult, err := c.compareJSONFiles(config.DiffKind, "/dev/null", expRspFile, daemonFile, diffFile) if diffResult { fileInfo, err := os.Stat(diffFile) if err != nil { @@ -1474,20 +1419,6 @@ func (c *JsonRpcCommand) compareJSON(config *Config, response interface{}, jsonF diffFileSize = fileInfo.Size() } - // Cleanup temp files - err = os.Remove(tempFile1) - if err != nil { - return false, err - } - err = os.Remove(tempFile2) - if err != nil { - return false, err - } - err = os.RemoveAll(baseName) - if err != nil { - return false, err - } - if diffFileSize != 0 || !diffResult { if !diffResult { err = errDiffTimeout From 1707d05d485d3aaacbef90c451ad7cfee2df8821 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sat, 20 Dec 2025 09:26:07 +0100 Subject: [PATCH 30/87] update test --- .../mainnet/eth_callBundle/test_06.json | 47 ++++++++++-------- .../mainnet/eth_callBundle/test_07.json | 48 ++++++++++--------- .../mainnet/eth_callBundle/test_08.json | 48 ++++++++++--------- .../mainnet/eth_callBundle/test_11.json | 47 ++++++++++-------- .../mainnet/eth_callBundle/test_13.json | 48 ++++++++++--------- 5 files changed, 130 insertions(+), 108 deletions(-) diff --git a/integration/mainnet/eth_callBundle/test_06.json b/integration/mainnet/eth_callBundle/test_06.json index 4bdde06b..73e7960e 100644 --- a/integration/mainnet/eth_callBundle/test_06.json +++ b/integration/mainnet/eth_callBundle/test_06.json @@ -1,23 +1,28 @@ [ - { - "test": { - "reference": "https://etherscan.io/tx/0xf395485549082ee9937b4c4ab9ff53e0da99a97f83342b88b1f6935c9b13b7a0 on block https://etherscan.io/block/17475713", - "description": "tx post EIP-1559 on block post EIP-1559" - }, - "request": { - "jsonrpc": "2.0", - "method": "eth_callBundle", - "params": [["0xf395485549082ee9937b4c4ab9ff53e0da99a97f83342b88b1f6935c9b13b7a0"], "0x10AA881", 50000], - "id": 1 - }, - "response": { - "error": { - "code": -32000, - "message": "insufficient funds for gas * price + value: address 0xdafde5e90affadfded748a3aee0853aa3ce85d81 have 25830686154113624 want 1553802184693833909" - }, - "id": 1, - "jsonrpc": "2.0" - } + { + "test": { + "reference": "https://etherscan.io/tx/0xf395485549082ee9937b4c4ab9ff53e0da99a97f83342b88b1f6935c9b13b7a0 on block https://etherscan.io/block/17475713", + "description": "tx post EIP-1559 on block post EIP-1559" + }, + "request": { + "jsonrpc": "2.0", + "method": "eth_callBundle", + "params": [ + [ + "0xf395485549082ee9937b4c4ab9ff53e0da99a97f83342b88b1f6935c9b13b7a0" + ], + "0x10AA881", + 50000 + ], + "id": 1 + }, + "response": { + "error": { + "code": -32000, + "message": "insufficient funds for gas * price + value: address 0xdAfdE5E90AffAdFdED748a3aeE0853aa3CE85D81 have 25830686154113624 want 1553802184693833909" + }, + "id": 1, + "jsonrpc": "2.0" + } } -] - +] \ No newline at end of file diff --git a/integration/mainnet/eth_callBundle/test_07.json b/integration/mainnet/eth_callBundle/test_07.json index b0c4a2b7..8a285a98 100644 --- a/integration/mainnet/eth_callBundle/test_07.json +++ b/integration/mainnet/eth_callBundle/test_07.json @@ -1,24 +1,28 @@ [ - { - "test": { - "reference": "https://etherscan.io/tx/0x67b7da808974bffe636e6c969cbdcb600f1196cdf06e2faba981414fc2511da2 on block https://etherscan.io/block/17999744", - "description": "tx post EIP-1559 on block post EIP-1559" - }, - - "request": { - "jsonrpc": "2.0", - "method": "eth_callBundle", - "params": [["0x67b7da808974bffe636e6c969cbdcb600f1196cdf06e2faba981414fc2511da2"], "0x112A780", 50000], - "id": 1 - }, - "response": { - "error": { - "code": -32000, - "message": "insufficient funds for gas * price + value: address 0x7898bcf2d41a713086c69e0bb69cae6934430792 have 44356203162035406 want 581791671697841985" - }, - "id": 1, - "jsonrpc": "2.0" - } + { + "test": { + "reference": "https://etherscan.io/tx/0x67b7da808974bffe636e6c969cbdcb600f1196cdf06e2faba981414fc2511da2 on block https://etherscan.io/block/17999744", + "description": "tx post EIP-1559 on block post EIP-1559" + }, + "request": { + "jsonrpc": "2.0", + "method": "eth_callBundle", + "params": [ + [ + "0x67b7da808974bffe636e6c969cbdcb600f1196cdf06e2faba981414fc2511da2" + ], + "0x112A780", + 50000 + ], + "id": 1 + }, + "response": { + "error": { + "code": -32000, + "message": "insufficient funds for gas * price + value: address 0x7898bcf2D41a713086C69E0bb69cAe6934430792 have 44356203162035406 want 581791671697841985" + }, + "id": 1, + "jsonrpc": "2.0" + } } -] - +] \ No newline at end of file diff --git a/integration/mainnet/eth_callBundle/test_08.json b/integration/mainnet/eth_callBundle/test_08.json index 008a800f..d201dfe3 100644 --- a/integration/mainnet/eth_callBundle/test_08.json +++ b/integration/mainnet/eth_callBundle/test_08.json @@ -1,24 +1,28 @@ [ - { - "test": { - "reference": "https://etherscan.io/tx/0x5b520840fef569738e1b54589ff90fe6858076e8a582732a98fa7d0bd4c2b9a6 on pre EIP1559 block", - "description": "tx post EIP1559 on block pre 1559" - }, - - "request": { - "jsonrpc": "2.0", - "method": "eth_callBundle", - "params": [["0x5b520840fef569738e1b54589ff90fe6858076e8a582732a98fa7d0bd4c2b9a6"], "0x10908", 50000], - "id": 1 - }, - "response": { - "id": 1, - "jsonrpc": "2.0", - "error": { - "code": -32000, - "message": "eip-1559 transactions require london" - } - } + { + "test": { + "reference": "https://etherscan.io/tx/0x5b520840fef569738e1b54589ff90fe6858076e8a582732a98fa7d0bd4c2b9a6 on pre EIP1559 block", + "description": "tx post EIP1559 on block pre 1559" + }, + "request": { + "jsonrpc": "2.0", + "method": "eth_callBundle", + "params": [ + [ + "0x5b520840fef569738e1b54589ff90fe6858076e8a582732a98fa7d0bd4c2b9a6" + ], + "0x10908", + 50000 + ], + "id": 1 + }, + "response": { + "error": { + "code": -32000, + "message": "eip-1559 transactions require London" + }, + "id": 1, + "jsonrpc": "2.0" + } } -] - +] \ No newline at end of file diff --git a/integration/mainnet/eth_callBundle/test_11.json b/integration/mainnet/eth_callBundle/test_11.json index 729fc466..5536c220 100644 --- a/integration/mainnet/eth_callBundle/test_11.json +++ b/integration/mainnet/eth_callBundle/test_11.json @@ -1,23 +1,28 @@ [ - { - "test": { - "reference": "https://etherscan.io/tx/0xae3f0896814b8598decc725788b40ec3c0f5f56adbdcb443b35669db23fe3148 on block https://etherscan.io/block/10000000", - "description": "tx pre EIP-1559 on block post EIP-1559" - }, - "request": { - "jsonrpc": "2.0", - "method": "eth_callBundle", - "params": [["0xae3f0896814b8598decc725788b40ec3c0f5f56adbdcb443b35669db23fe3148"], "0x112A880", 50000], - "id": 1 - }, - "response": { - "id": 1, - "jsonrpc": "2.0", - "error": { - "code": -32000, - "message": "insufficient funds for gas * price + value: address 0x263e47386006bc8e84872b8976dd4675da640bf6 have 1481561298817362 want 40061769000000000" - } - } + { + "test": { + "reference": "https://etherscan.io/tx/0xae3f0896814b8598decc725788b40ec3c0f5f56adbdcb443b35669db23fe3148 on block https://etherscan.io/block/10000000", + "description": "tx pre EIP-1559 on block post EIP-1559" + }, + "request": { + "jsonrpc": "2.0", + "method": "eth_callBundle", + "params": [ + [ + "0xae3f0896814b8598decc725788b40ec3c0f5f56adbdcb443b35669db23fe3148" + ], + "0x112A880", + 50000 + ], + "id": 1 + }, + "response": { + "error": { + "code": -32000, + "message": "insufficient funds for gas * price + value: address 0x263E47386006BC8e84872b8976DD4675da640bf6 have 1481561298817362 want 40061769000000000" + }, + "id": 1, + "jsonrpc": "2.0" + } } -] - +] \ No newline at end of file diff --git a/integration/mainnet/eth_callBundle/test_13.json b/integration/mainnet/eth_callBundle/test_13.json index aafb8417..36696990 100644 --- a/integration/mainnet/eth_callBundle/test_13.json +++ b/integration/mainnet/eth_callBundle/test_13.json @@ -1,24 +1,28 @@ [ - { - "test": { - "reference": "https://etherscan.io/tx/0x5a4bf6970980a9381e6d6c78d96ab278035bbff58c383ffe96a0a2bbc7c02a4b, on https://etherscan.io/block/10000000", - "description": "transaction pre EIP-1559 with block pre-EIP-1559" - }, - - "request": { - "jsonrpc": "2.0", - "method": "eth_callBundle", - "params": [["0x5a4bf6970980a9381e6d6c78d96ab278035bbff58c383ffe96a0a2bbc7c02a4b"], "0x989680", 50000], - "id": 1 - }, - "response": { - "id": 1, - "jsonrpc": "2.0", - "error": { - "code": -32000, - "message": "insufficient funds for gas * price + value: address 0x8a9d69aa686fa0f9bbdec21294f67d4d9cfb4a3e have 1392684180000000000 want 2000126000000000000" - } - } + { + "test": { + "reference": "https://etherscan.io/tx/0x5a4bf6970980a9381e6d6c78d96ab278035bbff58c383ffe96a0a2bbc7c02a4b, on https://etherscan.io/block/10000000", + "description": "transaction pre EIP-1559 with block pre-EIP-1559" + }, + "request": { + "jsonrpc": "2.0", + "method": "eth_callBundle", + "params": [ + [ + "0x5a4bf6970980a9381e6d6c78d96ab278035bbff58c383ffe96a0a2bbc7c02a4b" + ], + "0x989680", + 50000 + ], + "id": 1 + }, + "response": { + "error": { + "code": -32000, + "message": "insufficient funds for gas * price + value: address 0x8A9d69Aa686fA0f9BbDec21294F67D4D9CFb4A3E have 1392684180000000000 want 2000126000000000000" + }, + "id": 1, + "jsonrpc": "2.0" + } } -] - +] \ No newline at end of file From c2f72946dc08bbc635c18000c0d1b2e93b1c24b8 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sat, 20 Dec 2025 09:26:18 +0100 Subject: [PATCH 31/87] update test --- integration/mainnet/eth_callMany/test_03.json | 4 ++-- integration/mainnet/eth_callMany/test_10.json | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/integration/mainnet/eth_callMany/test_03.json b/integration/mainnet/eth_callMany/test_03.json index c6d1cb9d..f85212df 100644 --- a/integration/mainnet/eth_callMany/test_03.json +++ b/integration/mainnet/eth_callMany/test_03.json @@ -33,10 +33,10 @@ "response": { "error": { "code": -32000, - "message": "insufficient funds for gas * price + value: address 0xb5772b3a6be702d0387770c64bf17e36a4d38188 have 5120 want 1239566" + "message": "insufficient funds for gas * price + value: address 0xB5772B3A6Be702D0387770C64bf17e36a4D38188 have 5120 want 1239566" }, "id": 1, "jsonrpc": "2.0" } } -] +] \ No newline at end of file diff --git a/integration/mainnet/eth_callMany/test_10.json b/integration/mainnet/eth_callMany/test_10.json index 251e2a1c..577f9aee 100644 --- a/integration/mainnet/eth_callMany/test_10.json +++ b/integration/mainnet/eth_callMany/test_10.json @@ -34,10 +34,10 @@ "response": { "error": { "code": -32000, - "message": "insufficient funds for gas * price + value: address 0x1c62626e5f265d35fffa84a40767810a3ffc139c have 0 want 11893600000000" + "message": "insufficient funds for gas * price + value: address 0x1c62626E5f265D35ffFA84a40767810A3FfC139c have 0 want 11893600000000" }, "id": 1, "jsonrpc": "2.0" } } -] +] \ No newline at end of file From e3a4308d9de684dfd3d481699be14e490b356174 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sat, 20 Dec 2025 09:29:48 +0100 Subject: [PATCH 32/87] remove used struct and method --- cmd/integration/main.go | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 3d309d5d..917e9cc0 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -51,10 +51,6 @@ var ( "mainnet/engine_", } - testsNotCompared = []string{} - testsNotComparedMessage = []string{} - testsNotComparedError = []string{} - // testsOnLatest - add your list here testsOnLatest = []string{ "mainnet/debug_traceBlockByNumber/test_24.json", @@ -837,11 +833,6 @@ func isSkipped(currAPI, testName string, globalTestNumber int, config *Config) b return true } } - for _, currTest := range testsNotCompared { - if strings.Contains(apiFullTestName, currTest) { - return true - } - } } if config.ExcludeAPIList != "" { @@ -921,26 +912,6 @@ func apiUnderTest(currAPI, testName string, config *Config) bool { return false } -func isNotComparedMessage(testName, net string) bool { - testFullName := net + "/" + testName - for _, currTestName := range testsNotComparedMessage { - if currTestName == testFullName { - return true - } - } - return false -} - -func isNotComparedError(testName, net string) bool { - testFullName := net + "/" + testName - for _, currTestName := range testsNotComparedError { - if currTestName == testFullName { - return true - } - } - return false -} - func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response, expectedResponse []byte) error { if !dumpJSON { return nil From a1d6280e92850a794d6c9f017f9be3cb5176ed26 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sat, 20 Dec 2025 09:37:42 +0100 Subject: [PATCH 33/87] remove unused --- cmd/integration/main.go | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 917e9cc0..e5079e2f 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -768,40 +768,6 @@ func generateJWTSecret(filename string, length int) error { return nil } -func toLowerCase(inputFile, outputFile string) error { - inputContent, err := os.ReadFile(inputFile) - if err != nil { - return err - } - - outputContent := []byte(strings.ToLower(string(inputContent))) - - err = os.WriteFile(outputFile, outputContent, 0644) - if err != nil { - return err - } - return nil -} - -func replaceMessage(inputFile, outputFile, matchedString string) error { - inData, err := os.ReadFile(inputFile) - if err != nil { - return err - } - - lines := strings.Split(string(inData), "\n") - var output []string - for _, line := range lines { - if !strings.Contains(line, matchedString) { - output = append(output, line) - } else { - output = append(output, " \"message\": \"\"") - } - } - - return os.WriteFile(outputFile, []byte(strings.Join(output, "\n")), 0644) -} - func extractNumber(filename string) int { re := regexp.MustCompile(`\d+`) match := re.FindString(filename) From 58cf4053afb67ed9a992e23a043eb68196cae60d Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sat, 20 Dec 2025 12:10:34 +0100 Subject: [PATCH 34/87] add metafata --- integration/mainnet/eth_createAccessList/test_18.json | 7 ++++++- integration/mainnet/eth_createAccessList/test_19.json | 7 ++++++- integration/mainnet/eth_createAccessList/test_20.json | 7 ++++++- integration/mainnet/eth_createAccessList/test_22.json | 7 ++++++- 4 files changed, 24 insertions(+), 4 deletions(-) diff --git a/integration/mainnet/eth_createAccessList/test_18.json b/integration/mainnet/eth_createAccessList/test_18.json index 8098d31e..7193bf20 100644 --- a/integration/mainnet/eth_createAccessList/test_18.json +++ b/integration/mainnet/eth_createAccessList/test_18.json @@ -2,7 +2,12 @@ { "test": { "reference": "https://etherscan.io/tx/0x4ee2bbfbaf90afef640c6ddacde696e6c4b81f9f644fc8c78670b910f922236b", - "description": "with contract deploy and accessList with to" + "description": "with contract deploy and accessList with to", + "metadata": { + "response": { + "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] + } + } }, "request": { "jsonrpc":"2.0", diff --git a/integration/mainnet/eth_createAccessList/test_19.json b/integration/mainnet/eth_createAccessList/test_19.json index 1ca91b9b..51b4f356 100644 --- a/integration/mainnet/eth_createAccessList/test_19.json +++ b/integration/mainnet/eth_createAccessList/test_19.json @@ -2,7 +2,12 @@ { "test": { "reference": "https://etherscan.io/tx/0x3d63e943b1d979968db3da08a9a0d355f71af46ec3481ce313afbad733f77776", - "description": "1 access list entry" + "description": "1 access list entry", + "metadata": { + "response": { + "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] + } + } }, "request": { "jsonrpc":"2.0", diff --git a/integration/mainnet/eth_createAccessList/test_20.json b/integration/mainnet/eth_createAccessList/test_20.json index 94b37f71..38ed1532 100644 --- a/integration/mainnet/eth_createAccessList/test_20.json +++ b/integration/mainnet/eth_createAccessList/test_20.json @@ -2,7 +2,12 @@ { "test": { "reference": "https://etherscan.io/tx/0x3d63e943b1d979968db3da08a9a0d355f71af46ec3481ce313afbad733f77776", - "description": "1 access list entry" + "description": "1 access list entry", + "metadata": { + "response": { + "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] + } + } }, "request": { "jsonrpc":"2.0", diff --git a/integration/mainnet/eth_createAccessList/test_22.json b/integration/mainnet/eth_createAccessList/test_22.json index 30867536..849ae1e7 100644 --- a/integration/mainnet/eth_createAccessList/test_22.json +++ b/integration/mainnet/eth_createAccessList/test_22.json @@ -2,7 +2,12 @@ { "test": { "reference": "", - "description": "access list with state Override" + "description": "access list with state Override", + "metadata": { + "response": { + "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] + } + } }, "request": { "jsonrpc":"2.0", From 67d414aa4ad2c6a83f81f11e4f31fa57fa867af2 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Sun, 21 Dec 2025 09:49:03 +0100 Subject: [PATCH 35/87] use buffered I/O when reading JSON commands introduce json-iterator remove unused code --- cmd/integration/main.go | 87 ++++++++++++++--------------------------- go.mod | 3 ++ go.sum | 10 +++++ 3 files changed, 43 insertions(+), 57 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index e5079e2f..a57b2f68 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -2,13 +2,13 @@ package main import ( "archive/tar" + "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "crypto/rand" "encoding/hex" - "encoding/json" "errors" "flag" "fmt" @@ -32,8 +32,11 @@ import ( "github.com/golang-jwt/jwt/v5" "github.com/gorilla/websocket" "github.com/josephburnett/jd/v2" + jsoniter "github.com/json-iterator/go" ) +var json = jsoniter.ConfigCompatibleWithStandardLibrary + const ( DaemonOnOtherPort = "other-daemon" DaemonOnDefaultPort = "rpcdaemon" @@ -383,7 +386,7 @@ type TestDescriptor struct { } type JsonRpcResponseMetadata struct { - PathOptions json.RawMessage `json:"pathOptions"` + PathOptions jsoniter.RawMessage `json:"pathOptions"` } type JsonRpcTestMetadata struct { @@ -399,9 +402,9 @@ type JsonRpcTest struct { } type JsonRpcCommand struct { - Request json.RawMessage `json:"request"` - Response json.RawMessage `json:"response"` - TestInfo *JsonRpcTest `json:"test"` + Request jsoniter.RawMessage `json:"request"` + Response jsoniter.RawMessage `json:"response"` + TestInfo *JsonRpcTest `json:"test"` } func NewConfig() *Config { @@ -1237,42 +1240,6 @@ func runCompare(jsonDiff bool, errorFile, tempFile1, tempFile2, diffFile string) } } -func copyFile(src, dst string) (int64, error) { - sourceFileStat, err := os.Stat(src) - if err != nil { - return 0, err - } - - if !sourceFileStat.Mode().IsRegular() { - return 0, fmt.Errorf("%s is not a regular file", src) - } - - source, err := os.Open(src) - if err != nil { - return 0, err - } - defer func(source *os.File) { - err := source.Close() - if err != nil { - fmt.Printf("failed to close source file: %v\n", err) - } - }(source) - - destination, err := os.Create(dst) - if err != nil { - return 0, err - } - defer func(destination *os.File) { - err := destination.Close() - if err != nil { - fmt.Printf("failed to close destination file: %v\n", err) - } - }(destination) - - nBytes, err := io.Copy(destination, source) - return nBytes, err -} - var ( errDiffTimeout = errors.New("diff timeout") errDiffMismatch = errors.New("diff mismatch") @@ -1284,14 +1251,23 @@ func isArchive(jsonFilename string) bool { } func extractJsonCommands(jsonFilename string, metrics *TestMetrics) ([]JsonRpcCommand, error) { - var jsonrpcCommands []JsonRpcCommand - data, err := os.ReadFile(jsonFilename) + file, err := os.Open(jsonFilename) if err != nil { - return jsonrpcCommands, errors.New("cannot read file " + jsonFilename + ": " + err.Error()) + return nil, fmt.Errorf("cannot open file %s: %w", jsonFilename, err) } + defer func(file *os.File) { + err = file.Close() + if err != nil { + fmt.Printf("failed to close file %s: %v\n", jsonFilename, err) + } + }(file) + + reader := bufio.NewReaderSize(file, 8*os.Getpagesize()) + + var jsonrpcCommands []JsonRpcCommand start := time.Now() - if err := json.Unmarshal(data, &jsonrpcCommands); err != nil { - return jsonrpcCommands, errors.New("cannot parse JSON " + jsonFilename + ": " + err.Error()) + if err := json.NewDecoder(reader).Decode(&jsonrpcCommands); err != nil { + return nil, fmt.Errorf("cannot parse JSON %s: %w", jsonFilename, err) } metrics.UnmarshallingTime += time.Since(start) return jsonrpcCommands, nil @@ -1343,10 +1319,10 @@ func (c *JsonRpcCommand) compareJSONFiles(kind JsonDiffKind, errorFileName, file } } -func (c *JsonRpcCommand) compareJSON(config *Config, response interface{}, jsonFile, daemonFile, expRspFile, diffFile string, testNumber int, metrics *TestMetrics) (bool, error) { - diffFileSize := int64(0) +func (c *JsonRpcCommand) compareJSON(config *Config, daemonFile, expRspFile, diffFile string, metrics *TestMetrics) (bool, error) { metrics.ComparisonCount += 1 + diffFileSize := int64(0) diffResult, err := c.compareJSONFiles(config.DiffKind, "/dev/null", expRspFile, daemonFile, diffFile) if diffResult { fileInfo, err := os.Stat(diffFile) @@ -1368,10 +1344,7 @@ func (c *JsonRpcCommand) compareJSON(config *Config, response interface{}, jsonF return true, nil } -func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byte, config *Config, outputDir, daemonFile, expRspFile, diffFile string, descriptor *TestDescriptor, outcome *TestOutcome) { - jsonFile := descriptor.Name - testNumber := descriptor.Number - +func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byte, config *Config, outputDir, daemonFile, expRspFile, diffFile string, outcome *TestOutcome) { var expectedResponse []byte if result1 != nil { expectedResponse = result1 @@ -1392,7 +1365,7 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt var responseMap map[string]interface{} var respIsMap bool start := time.Now() - if err := json.Unmarshal(response, &responseMap); err == nil { + if err := json.NewDecoder(bytes.NewReader(response)).Decode(&responseMap); err == nil { outcome.Metrics.UnmarshallingTime += time.Since(start) respIsMap = true start = time.Now() @@ -1411,7 +1384,7 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt var expectedMap map[string]interface{} var expIsMap bool start = time.Now() - if err := json.Unmarshal(expectedResponse, &expectedMap); err == nil { + if err := json.NewDecoder(bytes.NewReader(expectedResponse)).Decode(&expectedMap); err == nil { outcome.Metrics.UnmarshallingTime += time.Since(start) expIsMap = true start := time.Now() @@ -1491,7 +1464,7 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt return } - same, err := c.compareJSON(config, responseMap, jsonFile, daemonFile, expRspFile, diffFile, testNumber, &outcome.Metrics) + same, err := c.compareJSON(config, daemonFile, expRspFile, diffFile, &outcome.Metrics) if err != nil { outcome.Error = err return @@ -1557,7 +1530,7 @@ func (c *JsonRpcCommand) run(ctx context.Context, config *Config, descriptor *Te daemonFile := outputAPIFilename + "-response.json" expRspFile := outputAPIFilename + "-expResponse.json" - c.processResponse(result, nil, responseInFile, config, outputDirName, daemonFile, expRspFile, diffFile, descriptor, outcome) + c.processResponse(result, nil, responseInFile, config, outputDirName, daemonFile, expRspFile, diffFile, outcome) } else { target = getTarget(DaemonOnDefaultPort, descriptor.Name, config) result, err := executeRequest(ctx, config, transportType, jwtAuth, target, request, &outcome.Metrics) @@ -1589,7 +1562,7 @@ func (c *JsonRpcCommand) run(ctx context.Context, config *Config, descriptor *Te daemonFile := outputAPIFilename + getJSONFilenameExt(DaemonOnDefaultPort, target) expRspFile := outputAPIFilename + getJSONFilenameExt(config.DaemonAsReference, target1) - c.processResponse(result, result1, nil, config, outputDirName, daemonFile, expRspFile, diffFile, descriptor, outcome) + c.processResponse(result, result1, nil, config, outputDirName, daemonFile, expRspFile, diffFile, outcome) return } } diff --git a/go.mod b/go.mod index 83827fd4..a6d76045 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,10 @@ require ( github.com/google/go-cmp v0.7.0 // indirect github.com/influxdata/tdigest v0.0.1 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect diff --git a/go.sum b/go.sum index 1f8e465f..fb47df3b 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,7 @@ github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e h1:mWOqoK5jV13ChKf github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654 h1:XOPLOMn/zT4jIgxfxSsoXPxkrzz0FaCHwp33x5POJ+Q= @@ -18,6 +19,7 @@ github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArs github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/influxdata/tdigest v0.0.1 h1:XpFptwYmnEKUqmkcDjrzffswZ3nvNeevbUSLPP/ZzIY= @@ -26,6 +28,8 @@ github.com/josephburnett/jd/v2 v2.3.0 h1:AyNT0zSStJ2j28zutWDO4fkc95JoICryWQRmDTR github.com/josephburnett/jd/v2 v2.3.0/go.mod h1:0I5+gbo7y8diuajJjm79AF44eqTheSJy1K7DSbIUFAQ= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -34,6 +38,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -44,6 +52,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/streadway/quantile v0.0.0-20220407130108-4246515d968d h1:X4+kt6zM/OVO6gbJdAfJR60MGPsqCzbtXNnjoGqdfAs= github.com/streadway/quantile v0.0.0-20220407130108-4246515d968d/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tsenart/vegeta/v12 v12.13.0 h1:J/UiNS3f69MkL0tsRLVUUV8uXXQZxdRUchtS+GYiSFc= From da4833f5e781d6d896d44dcfd48e960188d8ed63 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com> Date: Sun, 21 Dec 2025 15:02:57 +0100 Subject: [PATCH 36/87] align to go --- integration/run_tests.py | 141 +++------------------------------------ 1 file changed, 8 insertions(+), 133 deletions(-) diff --git a/integration/run_tests.py b/integration/run_tests.py index c9b9a237..92adc18c 100755 --- a/integration/run_tests.py +++ b/integration/run_tests.py @@ -35,16 +35,6 @@ "mainnet/engine_" # not supported on external EP ] -tests_not_compared = [ -] - -tests_not_compared_message = [ -] - -tests_not_compared_error = [ -] - - tests_on_latest = [ "mainnet/debug_traceBlockByNumber/test_24.json", "mainnet/debug_traceBlockByNumber/test_30.json", @@ -258,53 +248,6 @@ def get_jwt_secret(name): return "" -def to_lower_case(file, dest_file): - """ converts input string into lower case - """ - cmd = "tr '[:upper:]' '[:lower:]' < " + file + " > " + dest_file - os.system(cmd) - - -def replace_str_from_file(filer, filew, matched_string): - """ parse file and replace string - """ - with open(filer, "r", encoding='utf8') as input_file: - with open(filew, "w", encoding='utf8') as output_file: - # iterate all lines from file - for line in input_file: - # if text matches then don't write it - if (matched_string in line) == 0: - output_file.write(line) - - -def replace_message(filer, filew, matched_string): - """ parse file and replace string - """ - with open(filer, "r", encoding='utf8') as input_file: - with open(filew, "w", encoding='utf8') as output_file: - # iterate all lines from file - for line in input_file: - # if text matches then don't write it - if (matched_string in line) == 0: - output_file.write(line) - else: - output_file.write(" \"message\": \"\"\n") - - -def modified_str_from_file(filer, filew, matched_string): - """ parse file and convert string - """ - with open(filer, "r", encoding='utf8') as input_file: - with open(filew, "w", encoding='utf8') as output_file: - # iterate all lines from file - for line in input_file: - # if text matches then don't write it - if (matched_string in line) == 1: - output_file.write(line.lower()) - else: - output_file.write(line) - - def is_skipped(curr_api, test_name: str, global_test_number, config): """ determine if test must be skipped """ @@ -380,47 +323,6 @@ def api_under_test(curr_api, test_name, config): return in_latest_list -def is_not_compared_message(test_name, net: str): - """ determine if test not compared message field - """ - test_full_name = net + "/" + test_name - for curr_test_name in tests_not_compared_message: - if curr_test_name == test_full_name: - return 1 - return 0 - - -def is_not_compared_error(test_name, net: str): - """ determine if test not compared error field - """ - test_full_name = net + "/" + test_name - for curr_test_name in tests_not_compared_error: - if curr_test_name == test_full_name: - return 1 - return 0 - -def generate_json_report(filename, start_time, elapsed, total_tests, tested_apis, - loops, executed_tests, not_executed_tests, success_tests, - failed_tests, test_results): - """ Generate JSON report with test results """ - report = { - "summary": { - "start_time": start_time.isoformat(), - "time_elapsed": str(elapsed), - "available_tests": total_tests, - "available_tested_api": tested_apis, - "number_of_loops": loops + 1, - "executed_tests": executed_tests, - "not_executed_tests": not_executed_tests, - "success_tests": success_tests, - "failed_tests": failed_tests - }, - "test_results": test_results - } - - with open(filename, 'w', encoding='utf8') as f: - json.dump(report, f, indent=2) - def print_latest_block(server1_url: str, server2_url: str): """ print ltest block number """ @@ -697,6 +599,7 @@ def __init__(self): self.round_trip_time = timedelta(0) self.marshalling_time = timedelta(0) self.unmarshalling_time = timedelta(0) + self.noOfJsonDiffs = 0 class TestOutcome: @@ -704,6 +607,7 @@ def __init__(self, return_code: int = 0, error_msg: str = ''): self.return_code = return_code self.error_msg = error_msg self.metrics = TestMetrics() + self.noOfJsonDiffs = 0 def dump_jsons(dump_json, daemon_file, exp_rsp_file, output_dir, response, expected_response: str, outcome: TestOutcome): @@ -858,32 +762,8 @@ def run_compare(use_jsondiff, error_file, temp_file1, temp_file2, diff_file, tes def compare_json(config, response, json_file, daemon_file, exp_rsp_file, diff_file: str, test_number): """ Compare JSON response. """ - base_name = TEMP_DIRNAME + "/test_" + str(test_number) + "/" - if os.path.exists(base_name) == 0: - os.makedirs(base_name, exist_ok=True) - temp_file1 = base_name + "daemon_lower_case.txt" - temp_file2 = base_name + "rpc_lower_case.txt" - error_file = base_name + "ERROR.txt" - - if "error" in response: - to_lower_case(daemon_file, temp_file1) - to_lower_case(exp_rsp_file, temp_file2) - else: - cmd = "cp " + daemon_file + " " + temp_file1 - os.system(cmd) - cmd = "cp " + exp_rsp_file + " " + temp_file2 - os.system(cmd) - - if is_not_compared_message(json_file, config.net): - removed_line_string = "message" - replace_message(exp_rsp_file, temp_file1, removed_line_string) - replace_message(daemon_file, temp_file2, removed_line_string) - elif is_not_compared_error(json_file, config.net): - removed_line_string = "error" - replace_message(exp_rsp_file, temp_file1, removed_line_string) - replace_message(daemon_file, temp_file2, removed_line_string) - - diff_result = run_compare(config.use_jsondiff, error_file, temp_file1, temp_file2, diff_file, test_number) + + diff_result = run_compare(config.use_jsondiff, "/dev/null", daemon_file, exp_rsp_file, diff_file, test_number) diff_file_size = 0 return_code = 1 # ok error_msg = "" @@ -896,15 +776,6 @@ def compare_json(config, response, json_file, daemon_file, exp_rsp_file, diff_fi error_msg = "Failed" return_code = 0 # failed - if os.path.exists(temp_file1): - os.remove(temp_file1) - if os.path.exists(temp_file2): - os.remove(temp_file2) - if os.path.exists(base_name): - try: - shutil.rmtree(base_name) - except OSError: - pass return return_code, error_msg @@ -965,6 +836,7 @@ def process_response(target, target1, result, result1: str, response_in_file, co dump_jsons(True, daemon_file, exp_rsp_file, output_dir, response, expected_response, outcome) same, error_msg = compare_json(config, response, json_file, daemon_file, exp_rsp_file, diff_file, test_number) + outcome.noOfJsonDiffs = 1 # cleanup if same: os.remove(daemon_file) @@ -1157,6 +1029,7 @@ def main(argv) -> int: total_round_trip_time = timedelta(0) total_marshalling_time = timedelta(0) total_unmarshalling_time = timedelta(0) + no_of_json_diffs = 0 test_rep = 0 test_results = [] # Store test results for JSON report try: @@ -1257,6 +1130,7 @@ def main(argv) -> int: total_round_trip_time += test_outcome.metrics.round_trip_time total_marshalling_time += test_outcome.metrics.marshalling_time total_unmarshalling_time += test_outcome.metrics.unmarshalling_time + no_of_json_diffs += test_outcome.noOfJsonDiffs if result == 1: success_tests = success_tests + 1 if config.verbose_level: @@ -1304,6 +1178,7 @@ def main(argv) -> int: print(f"Total round_trip time: {str(total_round_trip_time)}") print(f"Total marshalling time: {str(total_marshalling_time)}") print(f"Total unmarshalling time: {str(total_unmarshalling_time)}") + print(f"No of json Diffs: {str(no_of_json_diffs)}") print(f"Test time-elapsed: {str(elapsed)}") print(f"Available tests: {global_test_number - 1}") print(f"Available tested api: {available_tested_apis}") From c9dc4ab09ee2c3ceede134c5b4b2c60f87f0eeb3 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sun, 21 Dec 2025 16:11:55 +0100 Subject: [PATCH 37/87] verify if servers works on same latest block --- cmd/integration/main.go | 80 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index a57b2f68..6c72f575 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1096,6 +1096,75 @@ func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtA return body, nil } + +ype RPCRequest struct { + Jsonrpc string `json:"jsonrpc"` + Method string `json:"method"` + Params []interface{} `json:"params"` + ID int `json:"id"` +} + +type RPCResponse struct { + Result string `json:"result"` + Error *struct { + Message string `json:"message"` + } `json:"error"` +} + +func getBlockNumber(ctx context.Context, config *Config, url string, metrics *TestMetrics) (uint64, error) { + payload := RPCRequest{ + Jsonrpc: "2.0", + Method: "eth_blockNumber", + Params: []interface{}{}, + ID: 1, + } + requestBytes, _ := json.Marshal(payload) + + responseBytes, err := executeHttpRequest(ctx, config, "http", "", url, requestBytes, metrics) + if err != nil { + return 0, err + } + + var rpcResp RPCResponse + if err := json.Unmarshal(responseBytes, &rpcResp); err != nil { + return 0, fmt.Errorf("error decoding json: %w", err) + } + + if rpcResp.Error != nil { + return 0, fmt.Errorf("RPC error: %s", rpcResp.Error.Message) + } + + cleanHex := strings.TrimPrefix(rpcResp.Result, "0x") + return strconv.ParseUint(cleanHex, 16, 64) +} + +func GetConsistentBlockNumber(config *Config, server1URL, server2URL string, maxRetries int, retryDelayMs int) *uint64 { + var bn1, bn2 uint64 + delay := time.Duration(retryDelayMs) * time.Millisecond + + metrics := TestMetrics{} + for i := 0; i < maxRetries; i++ { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + + var err1, err2 error + bn1, err1 = getBlockNumber(ctx, config, server1URL, &metrics) + bn2, err2 = getBlockNumber(ctx, config, server2URL, &metrics) + cancel() + + if err1 == nil && err2 == nil && bn1 == bn2 { + fmt.Printf("INFO: Nodi sincronizzati (Tentativo %d): %d\n", i+1, bn1) + return &bn1 + } + + if i < maxRetries-1 { + time.Sleep(delay) + } + } + + fmt.Printf("ERROR: Nodi non sincronizzati o errori di rete. Ultimi valori: %d / %d\n", bn1, bn2) + return nil +} + func executeWebSocketRequest(config *Config, transportType, jwtAuth, target string, request []byte, metrics *TestMetrics) ([]byte, error) { wsTarget := "ws://" + target dialer := websocket.Dialer{ @@ -1776,6 +1845,17 @@ func runMain() int { fmt.Println("Run tests using compression") } + if config.VerifyWithDaemon && config.TestsOnLatestBlock { + var server1 = fmt.Sprintf("%s:%d", config.DaemonOnHost, config.ServerPort) + var maxRetries = 10 + var retryDelayMs = 1000 + var consistent_block = GetConsistentBlockNumber(config, server1, config.ExternalProviderURL, maxRetries, retryDelayMs) + if consistent_block == nil { + fmt.Printf("ERROR: Tests on latest block: two servers are not synchronized") + return 1 + } + } + resultsAbsoluteDir, err := filepath.Abs(config.ResultsDir) if err != nil { return -1 From aed3d9cd6c450d52c19c6760a8885977281580f3 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sun, 21 Dec 2025 18:09:05 +0100 Subject: [PATCH 38/87] remove attempt in dumpJSON & gofmt --- cmd/integration/main.go | 173 ++++++++++++++++++---------------------- 1 file changed, 77 insertions(+), 96 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 6c72f575..c5c033a4 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -886,38 +886,20 @@ func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response return nil } - for attempt := 0; attempt < 10; attempt++ { - if err := os.MkdirAll(outputDir, 0755); err != nil { - fmt.Printf("Exception on makedirs: %s %v\n", outputDir, err) - continue - } + if err := os.MkdirAll(outputDir, 0755); err != nil { + return fmt.Errorf("Exception on makedirs: %s %v\n", outputDir, err) + } - if daemonFile != "" { - if _, err := os.Stat(daemonFile); err == nil { - err := os.Remove(daemonFile) - if err != nil { - return err - } - } - if err := os.WriteFile(daemonFile, response, 0644); err != nil { - fmt.Printf("Exception on file write daemon: %v attempt %d\n", err, attempt) - continue - } + if daemonFile != "" { + if err := os.WriteFile(daemonFile, response, 0644); err != nil { + return fmt.Errorf("Exception on file write daemon: %v\n", err) } + } - if expRspFile != "" { - if _, err := os.Stat(expRspFile); err == nil { - err := os.Remove(expRspFile) - if err != nil { - return err - } - } - if err := os.WriteFile(expRspFile, expectedResponse, 0644); err != nil { - fmt.Printf("Exception on file write expected: %v attempt %d\n", err, attempt) - continue - } + if expRspFile != "" { + if err := os.WriteFile(expRspFile, expectedResponse, 0644); err != nil { + return fmt.Errorf("Exception on file write expected: %v\n", err) } - break } return nil } @@ -1096,73 +1078,72 @@ func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtA return body, nil } - -ype RPCRequest struct { - Jsonrpc string `json:"jsonrpc"` - Method string `json:"method"` - Params []interface{} `json:"params"` - ID int `json:"id"` +type RPCRequest struct { + Jsonrpc string `json:"jsonrpc"` + Method string `json:"method"` + Params []interface{} `json:"params"` + ID int `json:"id"` } type RPCResponse struct { - Result string `json:"result"` - Error *struct { - Message string `json:"message"` - } `json:"error"` + Result string `json:"result"` + Error *struct { + Message string `json:"message"` + } `json:"error"` } func getBlockNumber(ctx context.Context, config *Config, url string, metrics *TestMetrics) (uint64, error) { - payload := RPCRequest{ - Jsonrpc: "2.0", - Method: "eth_blockNumber", - Params: []interface{}{}, - ID: 1, - } - requestBytes, _ := json.Marshal(payload) - - responseBytes, err := executeHttpRequest(ctx, config, "http", "", url, requestBytes, metrics) - if err != nil { - return 0, err - } - - var rpcResp RPCResponse - if err := json.Unmarshal(responseBytes, &rpcResp); err != nil { - return 0, fmt.Errorf("error decoding json: %w", err) - } - - if rpcResp.Error != nil { - return 0, fmt.Errorf("RPC error: %s", rpcResp.Error.Message) - } - - cleanHex := strings.TrimPrefix(rpcResp.Result, "0x") - return strconv.ParseUint(cleanHex, 16, 64) + payload := RPCRequest{ + Jsonrpc: "2.0", + Method: "eth_blockNumber", + Params: []interface{}{}, + ID: 1, + } + requestBytes, _ := json.Marshal(payload) + + responseBytes, err := executeHttpRequest(ctx, config, "http", "", url, requestBytes, metrics) + if err != nil { + return 0, err + } + + var rpcResp RPCResponse + if err := json.Unmarshal(responseBytes, &rpcResp); err != nil { + return 0, fmt.Errorf("error decoding json: %w", err) + } + + if rpcResp.Error != nil { + return 0, fmt.Errorf("RPC error: %s", rpcResp.Error.Message) + } + + cleanHex := strings.TrimPrefix(rpcResp.Result, "0x") + return strconv.ParseUint(cleanHex, 16, 64) } func GetConsistentBlockNumber(config *Config, server1URL, server2URL string, maxRetries int, retryDelayMs int) *uint64 { - var bn1, bn2 uint64 - delay := time.Duration(retryDelayMs) * time.Millisecond - - metrics := TestMetrics{} - for i := 0; i < maxRetries; i++ { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - - var err1, err2 error - bn1, err1 = getBlockNumber(ctx, config, server1URL, &metrics) - bn2, err2 = getBlockNumber(ctx, config, server2URL, &metrics) - cancel() - - if err1 == nil && err2 == nil && bn1 == bn2 { - fmt.Printf("INFO: Nodi sincronizzati (Tentativo %d): %d\n", i+1, bn1) - return &bn1 - } - - if i < maxRetries-1 { - time.Sleep(delay) - } - } - - fmt.Printf("ERROR: Nodi non sincronizzati o errori di rete. Ultimi valori: %d / %d\n", bn1, bn2) - return nil + var bn1, bn2 uint64 + delay := time.Duration(retryDelayMs) * time.Millisecond + + metrics := TestMetrics{} + for i := 0; i < maxRetries; i++ { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + + var err1, err2 error + bn1, err1 = getBlockNumber(ctx, config, server1URL, &metrics) + bn2, err2 = getBlockNumber(ctx, config, server2URL, &metrics) + cancel() + + if err1 == nil && err2 == nil && bn1 == bn2 { + fmt.Printf("INFO: Nodi sincronizzati (Tentativo %d): %d\n", i+1, bn1) + return &bn1 + } + + if i < maxRetries-1 { + time.Sleep(delay) + } + } + + fmt.Printf("ERROR: Nodi non sincronizzati o errori di rete. Ultimi valori: %d / %d\n", bn1, bn2) + return nil } func executeWebSocketRequest(config *Config, transportType, jwtAuth, target string, request []byte, metrics *TestMetrics) ([]byte, error) { @@ -1845,16 +1826,16 @@ func runMain() int { fmt.Println("Run tests using compression") } - if config.VerifyWithDaemon && config.TestsOnLatestBlock { - var server1 = fmt.Sprintf("%s:%d", config.DaemonOnHost, config.ServerPort) - var maxRetries = 10 - var retryDelayMs = 1000 - var consistent_block = GetConsistentBlockNumber(config, server1, config.ExternalProviderURL, maxRetries, retryDelayMs) - if consistent_block == nil { - fmt.Printf("ERROR: Tests on latest block: two servers are not synchronized") - return 1 - } - } + if config.VerifyWithDaemon && config.TestsOnLatestBlock { + var server1 = fmt.Sprintf("%s:%d", config.DaemonOnHost, config.ServerPort) + var maxRetries = 10 + var retryDelayMs = 1000 + var consistent_block = GetConsistentBlockNumber(config, server1, config.ExternalProviderURL, maxRetries, retryDelayMs) + if consistent_block == nil { + fmt.Printf("ERROR: Tests on latest block: two servers are not synchronized") + return 1 + } + } resultsAbsoluteDir, err := filepath.Abs(config.ResultsDir) if err != nil { From 9ec4279e1e9806b46730111f583e0e14b25b7e94 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sun, 21 Dec 2025 18:21:41 +0100 Subject: [PATCH 39/87] add ident --- cmd/integration/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index c5c033a4..bbf82112 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1419,7 +1419,7 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt outcome.Metrics.UnmarshallingTime += time.Since(start) respIsMap = true start = time.Now() - response, err = json.Marshal(responseMap) + response, err = json.MarshalIndent(responseMap, "", " ") if err != nil { outcome.Error = err return @@ -1438,7 +1438,7 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt outcome.Metrics.UnmarshallingTime += time.Since(start) expIsMap = true start := time.Now() - expectedResponse, err = json.Marshal(expectedMap) + expectedResponse, err = json.MarshalIndent(expectedMap, "", " ") if err != nil { outcome.Error = err return From b0ef38a203b393bfcc30d833cd9bd480d5474590 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sun, 21 Dec 2025 18:38:12 +0100 Subject: [PATCH 40/87] run jd on gorounte --- cmd/integration/main.go | 85 +++++++++++++++++++++++++++++------------ 1 file changed, 61 insertions(+), 24 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index bbf82112..e732508a 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1326,46 +1326,83 @@ func extractJsonCommands(jsonFilename string, metrics *TestMetrics) ([]JsonRpcCo func (c *JsonRpcCommand) compareJSONFiles(kind JsonDiffKind, errorFileName, fileName1, fileName2, diffFileName string) (bool, error) { switch kind { case JdLibrary: - jsonNode1, err := jd.ReadJsonFile(fileName1) - if err != nil { - return false, err + if success, err := c.runCompareJD(fileName1, fileName2, diffFileName); !success { + return false, fmt.Errorf("failed to compare %s and %s using jd command %s", fileName1, fileName2, err) } - jsonNode2, err := jd.ReadJsonFile(fileName2) - if err != nil { - return false, err + return true, nil + case JsonDiffTool: + if success := runCompare(true, errorFileName, fileName1, fileName2, diffFileName); !success { + return false, fmt.Errorf("failed to compare %s and %s using json-diff command", fileName1, fileName2) + } + return true, nil + case DiffTool: + if success := runCompare(false, errorFileName, fileName1, fileName2, diffFileName); !success { + return false, fmt.Errorf("failed to compare %s and %s using diff command", fileName1, fileName2) } - var diff jd.Diff - // Check if the test contains any response metadata with custom options for JSON diff + return true, nil + default: + return false, fmt.Errorf("unknown JSON diff kind: %d", kind) + } +} + +func (c *JsonRpcCommand) runCompareJD(fileName1, fileName2, diffFileName string) (bool, error) { + jsonNode1, err := jd.ReadJsonFile(fileName1) + if err != nil { + return false, err + } + jsonNode2, err := jd.ReadJsonFile(fileName2) + if err != nil { + return false, err + } + + type result struct { + diff jd.Diff + err error + } + + resChan := make(chan result, 1) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + go func() { + var d jd.Diff + var e error + if c.TestInfo != nil && c.TestInfo.Metadata != nil && c.TestInfo.Metadata.Response != nil { if c.TestInfo.Metadata.Response.PathOptions != nil { pathOptions := c.TestInfo.Metadata.Response.PathOptions options, err := jd.ReadOptionsString(string(pathOptions)) if err != nil { - return false, err + resChan <- result{err: err} + return } - diff = jsonNode1.Diff(jsonNode2, options...) + d = jsonNode1.Diff(jsonNode2, options...) + } else { + d = jsonNode1.Diff(jsonNode2) } } else { - diff = jsonNode1.Diff(jsonNode2) + d = jsonNode1.Diff(jsonNode2) } - diffString := diff.Render() + + resChan <- result{diff: d, err: e} + }() + + select { + case <-ctx.Done(): + return false, fmt.Errorf("JSON diff (JD) timeout: operation exceeded timeout for files %s and %s", fileName1, fileName2) + + case res := <-resChan: + if res.err != nil { + return false, res.err + } + + diffString := res.diff.Render() err = os.WriteFile(diffFileName, []byte(diffString), 0644) if err != nil { return false, err } return true, nil - case JsonDiffTool: - if success := runCompare(true, errorFileName, fileName1, fileName2, diffFileName); !success { - return false, fmt.Errorf("failed to compare %s and %s using json-diff command", fileName1, fileName2) - } - return true, nil - case DiffTool: - if success := runCompare(false, errorFileName, fileName1, fileName2, diffFileName); !success { - return false, fmt.Errorf("failed to compare %s and %s using diff command", fileName1, fileName2) - } - return true, nil - default: - return false, fmt.Errorf("unknown JSON diff kind: %d", kind) } } From 559bcbdb510b0a23e1a6364b3934c84a2226d580 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Tue, 23 Dec 2025 08:19:55 +0100 Subject: [PATCH 41/87] remove reference to silk and few erigon remove option -m --- cmd/perf/main.go | 182 ++++++++++------------------------------------- 1 file changed, 36 insertions(+), 146 deletions(-) diff --git a/cmd/perf/main.go b/cmd/perf/main.go index 0c65944f..6fc16cc5 100644 --- a/cmd/perf/main.go +++ b/cmd/perf/main.go @@ -32,29 +32,24 @@ const ( DefaultVegetaPatternTarFile = "" DefaultDaemonVegetaOnCore = "-:-" DefaultErigonBuildDir = "" - DefaultSilkwormBuildDir = "" DefaultErigonAddress = "localhost" - DefaultTestMode = "3" DefaultWaitingTime = 5 DefaultMaxConn = "9000" DefaultTestType = "eth_getLogs" DefaultVegetaResponseTimeout = "300s" DefaultMaxBodyRsp = "1500" - Silkworm = "silkworm" - Erigon = "rpcdaemon" - BinaryDir = "bin" - SilkwormServerName = "rpcdaemon" - ErigonServerName = "rpcdaemon" + Erigon = "rpcdaemon" + BinaryDir = "bin" + ErigonServerName = "rpcdaemon" ) var ( - RunTestDirname string - VegetaPatternDirname string - VegetaReport string - VegetaTarFileName string - VegetaPatternSilkwormBase string - VegetaPatternErigonBase string + RunTestDirname string + VegetaPatternDirname string + VegetaReport string + VegetaTarFileName string + VegetaPatternErigonBase string ) func init() { @@ -64,7 +59,6 @@ func init() { VegetaPatternDirname = RunTestDirname + "/erigon_stress_test" VegetaReport = RunTestDirname + "/vegeta_report.hrd" VegetaTarFileName = RunTestDirname + "/vegeta_TAR_File" - VegetaPatternSilkwormBase = VegetaPatternDirname + "/vegeta_geth_" VegetaPatternErigonBase = VegetaPatternDirname + "/vegeta_erigon_" } @@ -72,12 +66,9 @@ func init() { type Config struct { VegetaPatternTarFile string DaemonVegetaOnCore string - ErigonDir string - SilkwormDir string Repetitions int TestSequence string RPCDaemonAddress string - TestMode string TestType string TestingDaemon string WaitingTime int @@ -105,12 +96,9 @@ func NewConfig() *Config { return &Config{ VegetaPatternTarFile: DefaultVegetaPatternTarFile, DaemonVegetaOnCore: DefaultDaemonVegetaOnCore, - ErigonDir: DefaultErigonBuildDir, - SilkwormDir: DefaultSilkwormBuildDir, Repetitions: DefaultRepetitions, TestSequence: DefaultTestSequence, RPCDaemonAddress: DefaultErigonAddress, - TestMode: DefaultTestMode, TestType: DefaultTestType, TestingDaemon: "", WaitingTime: DefaultWaitingTime, @@ -136,23 +124,10 @@ func NewConfig() *Config { // Validate checks the configuration for conflicts and invalid values func (c *Config) Validate() error { - if c.JSONReportFile != "" && c.TestMode == "3" { - return fmt.Errorf("incompatible option json-report with test-mode=3") - } - - if c.TestMode == "3" && c.TestingDaemon != "" { - return fmt.Errorf("incompatible option test-mode=3 and testing-daemon") - } - if c.JSONReportFile != "" && c.TestingDaemon == "" { return fmt.Errorf("with json-report must also set testing-daemon") } - if (c.ErigonDir != DefaultErigonBuildDir || c.SilkwormDir != DefaultSilkwormBuildDir) && - c.RPCDaemonAddress != DefaultErigonAddress { - return fmt.Errorf("incompatible option rpc-daemon-address with erigon-dir/silk-dir") - } - if c.EmptyCache { currentUser, err := user.Current() if err != nil { @@ -163,16 +138,6 @@ func (c *Config) Validate() error { } } - if c.CreateTestReport { - if _, err := os.Stat(c.ErigonDir); c.ErigonDir != "" && os.IsNotExist(err) { - return fmt.Errorf("erigon build dir not specified correctly: %s", c.ErigonDir) - } - - if _, err := os.Stat(c.SilkwormDir); c.SilkwormDir != "" && os.IsNotExist(err) { - return fmt.Errorf("silkworm build dir not specified correctly: %s", c.SilkwormDir) - } - } - return nil } @@ -250,16 +215,14 @@ type JSONReport struct { // PlatformInfo holds platform hardware and software information type PlatformInfo struct { - Vendor string `json:"vendor"` - Product string `json:"product"` - Board string `json:"board"` - CPU string `json:"cpu"` - Bogomips string `json:"bogomips"` - Kernel string `json:"kernel"` - GCCVersion string `json:"gccVersion"` - GoVersion string `json:"goVersion"` - SilkrpcCommit string `json:"silkrpcCommit"` - ErigonCommit string `json:"erigonCommit"` + Vendor string `json:"vendor"` + Product string `json:"product"` + Board string `json:"board"` + CPU string `json:"cpu"` + Bogomips string `json:"bogomips"` + Kernel string `json:"kernel"` + GCCVersion string `json:"gccVersion"` + GoVersion string `json:"goVersion"` } // ConfigurationInfo holds test configuration information @@ -603,14 +566,9 @@ func (pt *PerfTest) CopyAndExtractPatternFile() error { // Substitute address if not localhost if pt.config.RPCDaemonAddress != "localhost" { - silkwormPattern := VegetaPatternSilkwormBase + pt.config.TestType + ".txt" - erigonPattern := VegetaPatternErigonBase + pt.config.TestType + ".txt" + patternDir := VegetaPatternErigonBase + pt.config.TestType + ".txt" - if err := pt.replaceInFile(silkwormPattern, "localhost", pt.config.RPCDaemonAddress); err != nil { - log.Printf("Warning: failed to replace address in silkworm pattern: %v", err) - } - - if err := pt.replaceInFile(erigonPattern, "localhost", pt.config.RPCDaemonAddress); err != nil { + if err := pt.replaceInFile(patternDir, "localhost", pt.config.RPCDaemonAddress); err != nil { log.Printf("Warning: failed to replace address in erigon pattern: %v", err) } } @@ -733,11 +691,7 @@ func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, nam // Determine pattern file var pattern string - if name == Silkworm { - pattern = VegetaPatternSilkwormBase + pt.config.TestType + ".txt" - } else { - pattern = VegetaPatternErigonBase + pt.config.TestType + ".txt" - } + pattern = VegetaPatternErigonBase + pt.config.TestType + ".txt" // Create the binary file name timestamp := time.Now().Format("20060102150405") @@ -791,11 +745,7 @@ func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, nam // Check if the server is still alive during the test if pt.config.CheckServerAlive { var serverName string - if name == Silkworm { - serverName = SilkwormServerName - } else { - serverName = ErigonServerName - } + serverName = ErigonServerName if !IsProcessRunning(serverName) { fmt.Println("test failed: server is Dead") @@ -928,11 +878,7 @@ func (pt *PerfTest) ExecuteSequence(ctx context.Context, sequence []TestSequence // Get pattern to extract port information var pattern string - if tag == Silkworm { - pattern = VegetaPatternSilkwormBase + pt.config.TestType + ".txt" - } else { - pattern = VegetaPatternErigonBase + pt.config.TestType + ".txt" - } + pattern = VegetaPatternErigonBase + pt.config.TestType + ".txt" // Print port information if file, err := os.Open(pattern); err == nil { @@ -1173,24 +1119,16 @@ func (tr *TestReport) Open() error { cpuModel := tr.hardware.GetCPUModel() bogomips := tr.hardware.GetBogomips() - var silkrpcCommit, erigonCommit string - if tr.config.TestMode == "1" || tr.config.TestMode == "3" { - silkrpcCommit = GetGitCommit(tr.config.SilkwormDir) - } - if tr.config.TestMode == "2" || tr.config.TestMode == "3" { - erigonCommit = GetGitCommit(tr.config.ErigonDir) - } - // Write headers if err := tr.writeTestHeader(cpuModel, bogomips, kernelVersion, checksum, - gccVersion, goVersion, silkrpcCommit, erigonCommit); err != nil { + gccVersion, goVersion); err != nil { return fmt.Errorf("failed to write test header: %w", err) } // Initialise the JSON report if needed if tr.config.JSONReportFile != "" { tr.initializeJSONReport(cpuModel, bogomips, kernelVersion, checksum, - gccVersion, goVersion, silkrpcCommit, erigonCommit) + gccVersion, goVersion) } return nil @@ -1245,8 +1183,7 @@ func (tr *TestReport) createCSVFile() error { } // writeTestHeader writes the test configuration header to CSV -func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksum, - gccVersion, goVersion, silkrpcCommit, erigonCommit string) error { +func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksum, gccVersion, goVersion string) error { // Write platform information emptyRow := make([]string, 14) @@ -1301,14 +1238,6 @@ func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksu if err != nil { return err } - err = tr.csvWriter.Write(append(emptyRow[:12], "silkrpcVersion", silkrpcCommit)) - if err != nil { - return err - } - err = tr.csvWriter.Write(append(emptyRow[:12], "erigonVersion", erigonCommit)) - if err != nil { - return err - } // Empty rows for range 2 { @@ -1334,20 +1263,18 @@ func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksu // initializeJSONReport initializes the JSON report structure func (tr *TestReport) initializeJSONReport(cpuModel, bogomips, kernelVersion, checksum, - gccVersion, goVersion, silkrpcCommit, erigonCommit string) { + gccVersion, goVersion string) { tr.jsonReport = &JSONReport{ Platform: PlatformInfo{ - Vendor: strings.TrimSpace(tr.hardware.Vendor()), - Product: strings.TrimSpace(tr.hardware.Product()), - Board: strings.TrimSpace(tr.hardware.Board()), - CPU: strings.TrimSpace(cpuModel), - Bogomips: strings.TrimSpace(bogomips), - Kernel: strings.TrimSpace(kernelVersion), - GCCVersion: strings.TrimSpace(gccVersion), - GoVersion: strings.TrimSpace(goVersion), - SilkrpcCommit: strings.TrimSpace(silkrpcCommit), - ErigonCommit: strings.TrimSpace(erigonCommit), + Vendor: strings.TrimSpace(tr.hardware.Vendor()), + Product: strings.TrimSpace(tr.hardware.Product()), + Board: strings.TrimSpace(tr.hardware.Board()), + CPU: strings.TrimSpace(cpuModel), + Bogomips: strings.TrimSpace(bogomips), + Kernel: strings.TrimSpace(kernelVersion), + GCCVersion: strings.TrimSpace(gccVersion), + GoVersion: strings.TrimSpace(goVersion), }, Configuration: ConfigurationInfo{ TestingDaemon: tr.config.TestingDaemon, @@ -1630,12 +1557,6 @@ func main() { Value: DefaultTestType, Usage: "Test type (e.g., eth_call, eth_getLogs)", }, - &cli.StringFlag{ - Name: "test-mode", - Aliases: []string{"m"}, - Value: DefaultTestMode, - Usage: "Test mode: silkworm(1), erigon(2), both(3)", - }, &cli.StringFlag{ Name: "pattern-file", Aliases: []string{"p"}, @@ -1666,18 +1587,6 @@ func main() { Value: DefaultErigonAddress, Usage: "RPC daemon address (e.g., 192.2.3.1)", }, - &cli.StringFlag{ - Name: "erigon-dir", - Aliases: []string{"g"}, - Value: DefaultErigonBuildDir, - Usage: "Path to Erigon folder", - }, - &cli.StringFlag{ - Name: "silk-dir", - Aliases: []string{"s"}, - Value: DefaultSilkwormBuildDir, - Usage: "Path to Silkworm folder", - }, &cli.StringFlag{ Name: "run-vegeta-on-core", Aliases: []string{"c"}, @@ -1742,14 +1651,11 @@ func runPerfTests(c *cli.Context) error { config.TestingDaemon = c.String("testing-daemon") config.ChainName = c.String("blockchain") config.TestType = c.String("test-type") - config.TestMode = c.String("test-mode") config.VegetaPatternTarFile = c.String("pattern-file") config.Repetitions = c.Int("repetitions") config.TestSequence = c.String("test-sequence") config.WaitingTime = c.Int("wait-after-test-sequence") config.RPCDaemonAddress = c.String("rpc-daemon-address") - config.ErigonDir = c.String("erigon-dir") - config.SilkwormDir = c.String("silk-dir") config.DaemonVegetaOnCore = c.String("run-vegeta-on-core") config.VegetaResponseTimeout = c.String("response-timeout") config.MaxBodyRsp = c.String("max-body-rsp") @@ -1804,25 +1710,9 @@ func runPerfTests(c *cli.Context) error { // Create context ctx := context.Background() - // Run tests based on test mode - if config.TestMode == "1" || config.TestMode == "3" { - fmt.Println("Testing Silkworm...") - if err := perfTest.ExecuteSequence(ctx, sequence, Silkworm); err != nil { - fmt.Printf("Performance Test failed, error: %v\n", err) - return err - } - - if config.TestMode == "3" { - fmt.Println("--------------------------------------------------------------------------------------------") - } - } - - if config.TestMode == "2" || config.TestMode == "3" { - fmt.Println("Testing Erigon...") - if err := perfTest.ExecuteSequence(ctx, sequence, Erigon); err != nil { - fmt.Printf("Performance Test failed, error: %v\n", err) - return err - } + if err := perfTest.ExecuteSequence(ctx, sequence, Erigon); err != nil { + fmt.Printf("Performance Test failed, error: %v\n", err) + return err } fmt.Println("Performance Test completed successfully.") From 5fc7db202dc5ce7d8ec4f333bed4f1780aa97259 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Tue, 23 Dec 2025 09:53:11 +0100 Subject: [PATCH 42/87] remove reference as erigon --- cmd/perf/main.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/cmd/perf/main.go b/cmd/perf/main.go index 6fc16cc5..104ac3de 100644 --- a/cmd/perf/main.go +++ b/cmd/perf/main.go @@ -31,17 +31,15 @@ const ( DefaultRepetitions = 10 DefaultVegetaPatternTarFile = "" DefaultDaemonVegetaOnCore = "-:-" - DefaultErigonBuildDir = "" - DefaultErigonAddress = "localhost" + DefaultServerAddress = "localhost" DefaultWaitingTime = 5 DefaultMaxConn = "9000" DefaultTestType = "eth_getLogs" DefaultVegetaResponseTimeout = "300s" DefaultMaxBodyRsp = "1500" - Erigon = "rpcdaemon" + ServerName = "rpcdaemon" BinaryDir = "bin" - ErigonServerName = "rpcdaemon" ) var ( @@ -49,7 +47,7 @@ var ( VegetaPatternDirname string VegetaReport string VegetaTarFileName string - VegetaPatternErigonBase string + VegetaPatternBase string ) func init() { @@ -59,7 +57,7 @@ func init() { VegetaPatternDirname = RunTestDirname + "/erigon_stress_test" VegetaReport = RunTestDirname + "/vegeta_report.hrd" VegetaTarFileName = RunTestDirname + "/vegeta_TAR_File" - VegetaPatternErigonBase = VegetaPatternDirname + "/vegeta_erigon_" + VegetaPatternBase = VegetaPatternDirname + "/vegeta_erigon_" } // Config holds all configuration for the performance test @@ -98,7 +96,7 @@ func NewConfig() *Config { DaemonVegetaOnCore: DefaultDaemonVegetaOnCore, Repetitions: DefaultRepetitions, TestSequence: DefaultTestSequence, - RPCDaemonAddress: DefaultErigonAddress, + RPCDaemonAddress: DefaultServerAddress, TestType: DefaultTestType, TestingDaemon: "", WaitingTime: DefaultWaitingTime, @@ -566,10 +564,10 @@ func (pt *PerfTest) CopyAndExtractPatternFile() error { // Substitute address if not localhost if pt.config.RPCDaemonAddress != "localhost" { - patternDir := VegetaPatternErigonBase + pt.config.TestType + ".txt" + patternDir := VegetaPatternBase + pt.config.TestType + ".txt" if err := pt.replaceInFile(patternDir, "localhost", pt.config.RPCDaemonAddress); err != nil { - log.Printf("Warning: failed to replace address in erigon pattern: %v", err) + log.Printf("Warning: failed to replace address in pattern: %v", err) } } @@ -691,7 +689,7 @@ func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, nam // Determine pattern file var pattern string - pattern = VegetaPatternErigonBase + pt.config.TestType + ".txt" + pattern = VegetaPatternBase + pt.config.TestType + ".txt" // Create the binary file name timestamp := time.Now().Format("20060102150405") @@ -745,7 +743,7 @@ func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, nam // Check if the server is still alive during the test if pt.config.CheckServerAlive { var serverName string - serverName = ErigonServerName + serverName = ServerName if !IsProcessRunning(serverName) { fmt.Println("test failed: server is Dead") @@ -878,7 +876,7 @@ func (pt *PerfTest) ExecuteSequence(ctx context.Context, sequence []TestSequence // Get pattern to extract port information var pattern string - pattern = VegetaPatternErigonBase + pt.config.TestType + ".txt" + pattern = VegetaPatternBase + pt.config.TestType + ".txt" // Print port information if file, err := os.Open(pattern); err == nil { @@ -1127,6 +1125,7 @@ func (tr *TestReport) Open() error { // Initialise the JSON report if needed if tr.config.JSONReportFile != "" { + fmt.Println ("init gen report file") tr.initializeJSONReport(cpuModel, bogomips, kernelVersion, checksum, gccVersion, goVersion) } @@ -1316,6 +1315,7 @@ func (tr *TestReport) WriteTestReport(metrics *TestMetrics) error { // Write to JSON if enabled if tr.config.JSONReportFile != "" { + fmt.Println ("write Test Report") if err := tr.writeTestReportToJSON(metrics); err != nil { return fmt.Errorf("failed to write JSON report: %w", err) } @@ -1584,7 +1584,7 @@ func main() { &cli.StringFlag{ Name: "rpc-daemon-address", Aliases: []string{"d"}, - Value: DefaultErigonAddress, + Value: DefaultServerAddress, Usage: "RPC daemon address (e.g., 192.2.3.1)", }, &cli.StringFlag{ @@ -1710,7 +1710,7 @@ func runPerfTests(c *cli.Context) error { // Create context ctx := context.Background() - if err := perfTest.ExecuteSequence(ctx, sequence, Erigon); err != nil { + if err := perfTest.ExecuteSequence(ctx, sequence, ServerName); err != nil { fmt.Printf("Performance Test failed, error: %v\n", err) return err } From ce1f82b566269f43d89674370dbb65f78ae06dff Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Tue, 23 Dec 2025 10:44:45 +0100 Subject: [PATCH 43/87] avoid reading response into buffer move decoding and validating indented encoding --- cmd/integration/main.go | 262 ++++++++++++++++++++++------------------ 1 file changed, 142 insertions(+), 120 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index e732508a..02b3d714 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -18,6 +18,7 @@ import ( "os/exec" "os/signal" "path/filepath" + "reflect" "regexp" "runtime" "runtime/pprof" @@ -365,6 +366,7 @@ type TestMetrics struct { MarshallingTime time.Duration UnmarshallingTime time.Duration ComparisonCount int + EqualCount int } type TestOutcome struct { @@ -403,7 +405,7 @@ type JsonRpcTest struct { type JsonRpcCommand struct { Request jsoniter.RawMessage `json:"request"` - Response jsoniter.RawMessage `json:"response"` + Response any `json:"response"` TestInfo *JsonRpcTest `json:"test"` } @@ -881,7 +883,7 @@ func apiUnderTest(currAPI, testName string, config *Config) bool { return false } -func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response, expectedResponse []byte) error { +func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response, expectedResponse any, metrics *TestMetrics) error { if !dumpJSON { return nil } @@ -891,13 +893,25 @@ func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response } if daemonFile != "" { - if err := os.WriteFile(daemonFile, response, 0644); err != nil { + start := time.Now() + responseData, err := json.MarshalIndent(response, "", " ") + if err != nil { + return err + } + metrics.MarshallingTime += time.Since(start) + if err := os.WriteFile(daemonFile, responseData, 0644); err != nil { return fmt.Errorf("Exception on file write daemon: %v\n", err) } } if expRspFile != "" { - if err := os.WriteFile(expRspFile, expectedResponse, 0644); err != nil { + start := time.Now() + expectedResponseData, err := json.MarshalIndent(expectedResponse, "", " ") + if err != nil { + return err + } + metrics.MarshallingTime += time.Since(start) + if err := os.WriteFile(expRspFile, expectedResponseData, 0644); err != nil { return fmt.Errorf("Exception on file write expected: %v\n", err) } } @@ -969,8 +983,13 @@ func validateJsonRpcResponseObject(response map[string]any, strict bool) error { // This implies that the response must be either a valid JSON-RPC object, i.e. a JSON object containing at least // "jsonrpc" and "id" fields or a JSON array where each element (if any) is in turn a valid JSON-RPC object. func validateJsonRpcResponse(response any) error { - _, isArray := response.([]any) - responseAsMap, isMap := response.(map[string]any) + value := reflect.ValueOf(response) + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + unwrappedResponse := value.Interface() + responseAsArray, isArray := (unwrappedResponse).([]any) + responseAsMap, isMap := (unwrappedResponse).(map[string]any) if !isArray && !isMap { return errJsonRpcUnexpectedFormat } @@ -982,7 +1001,7 @@ func validateJsonRpcResponse(response any) error { } } if isArray { - for _, element := range response.([]any) { + for _, element := range responseAsArray { elementAsMap, isElementMap := element.(map[string]any) if !isElementMap { return errJsonRpcUnexpectedFormat @@ -996,7 +1015,7 @@ func validateJsonRpcResponse(response any) error { return nil } -func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtAuth, target string, request []byte, metrics *TestMetrics) ([]byte, error) { +func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtAuth, target string, request []byte, response any, metrics *TestMetrics) error { headers := map[string]string{ "Content-Type": "application/json", } @@ -1025,7 +1044,7 @@ func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtA if config.VerboseLevel > 0 { fmt.Printf("\nhttp request creation fail: %s %v\n", targetURL, err) } - return nil, err + return err } for k, v := range headers { @@ -1043,7 +1062,7 @@ func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtA if config.VerboseLevel > 0 { fmt.Printf("\nhttp connection fail: %s %v\n", targetURL, err) } - return nil, err + return err } defer func(Body io.ReadCloser) { err := Body.Close() @@ -1056,70 +1075,64 @@ func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtA if config.VerboseLevel > 1 { fmt.Printf("\npost result status_code: %d\n", resp.StatusCode) } - return nil, fmt.Errorf("http status %v", resp.Status) + return fmt.Errorf("http status %v", resp.Status) } - body, err := io.ReadAll(resp.Body) - if err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nfailed to read response body: %v\n", err) - } - return nil, err + start = time.Now() + if err = json.NewDecoder(resp.Body).Decode(response); err != nil { + return fmt.Errorf("cannot decode http body as json %w", err) } - - if config.VerboseLevel > 1 { - fmt.Printf("\nhttp response body: %s\n", string(body)) + metrics.UnmarshallingTime += time.Since(start) + if err = validateJsonRpcResponse(response); err != nil { // TODO: improve or remove (casts as well) + return fmt.Errorf("json response in invalid: %w", err) } if config.VerboseLevel > 1 { - fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(body)) + raw, _ := json.Marshal(response) + fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(raw)) } - return body, nil + return nil } -type RPCRequest struct { +type JsonRpcRequest struct { Jsonrpc string `json:"jsonrpc"` Method string `json:"method"` Params []interface{} `json:"params"` - ID int `json:"id"` + Id int `json:"id"` } -type RPCResponse struct { +type JsonRpcResponse struct { Result string `json:"result"` Error *struct { Message string `json:"message"` } `json:"error"` } -func getBlockNumber(ctx context.Context, config *Config, url string, metrics *TestMetrics) (uint64, error) { - payload := RPCRequest{ +func getLatestBlockNumber(ctx context.Context, config *Config, url string, metrics *TestMetrics) (uint64, error) { + request := JsonRpcRequest{ Jsonrpc: "2.0", Method: "eth_blockNumber", Params: []interface{}{}, - ID: 1, + Id: 1, } - requestBytes, _ := json.Marshal(payload) + requestBytes, _ := json.Marshal(request) - responseBytes, err := executeHttpRequest(ctx, config, "http", "", url, requestBytes, metrics) + var response JsonRpcResponse + err := executeHttpRequest(ctx, config, "http", "", url, requestBytes, response, metrics) if err != nil { return 0, err } - var rpcResp RPCResponse - if err := json.Unmarshal(responseBytes, &rpcResp); err != nil { - return 0, fmt.Errorf("error decoding json: %w", err) - } - - if rpcResp.Error != nil { - return 0, fmt.Errorf("RPC error: %s", rpcResp.Error.Message) + if response.Error != nil { + return 0, fmt.Errorf("RPC error: %s", response.Error.Message) } - cleanHex := strings.TrimPrefix(rpcResp.Result, "0x") - return strconv.ParseUint(cleanHex, 16, 64) + result := strings.TrimPrefix(response.Result, "0x") + return strconv.ParseUint(result, 16, 64) } -func GetConsistentBlockNumber(config *Config, server1URL, server2URL string, maxRetries int, retryDelayMs int) *uint64 { +func getConsistentLatestBlock(config *Config, server1URL, server2URL string, maxRetries int, retryDelayMs int) (uint64, error) { var bn1, bn2 uint64 delay := time.Duration(retryDelayMs) * time.Millisecond @@ -1128,13 +1141,16 @@ func GetConsistentBlockNumber(config *Config, server1URL, server2URL string, max ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) var err1, err2 error - bn1, err1 = getBlockNumber(ctx, config, server1URL, &metrics) - bn2, err2 = getBlockNumber(ctx, config, server2URL, &metrics) + bn1, err1 = getLatestBlockNumber(ctx, config, server1URL, &metrics) + bn2, err2 = getLatestBlockNumber(ctx, config, server2URL, &metrics) cancel() + if config.VerboseLevel > 1 { + fmt.Printf("retry: %d nodes: %s, %s latest blocks: %d, %d\n", i+1, server1URL, server2URL, bn1, bn2) + } + if err1 == nil && err2 == nil && bn1 == bn2 { - fmt.Printf("INFO: Nodi sincronizzati (Tentativo %d): %d\n", i+1, bn1) - return &bn1 + return bn1, nil } if i < maxRetries-1 { @@ -1142,11 +1158,10 @@ func GetConsistentBlockNumber(config *Config, server1URL, server2URL string, max } } - fmt.Printf("ERROR: Nodi non sincronizzati o errori di rete. Ultimi valori: %d / %d\n", bn1, bn2) - return nil + return 0, fmt.Errorf("nodes not synced, last values: %d / %d", bn1, bn2) } -func executeWebSocketRequest(config *Config, transportType, jwtAuth, target string, request []byte, metrics *TestMetrics) ([]byte, error) { +func executeWebSocketRequest(config *Config, transportType, jwtAuth, target string, request []byte, response any, metrics *TestMetrics) error { wsTarget := "ws://" + target dialer := websocket.Dialer{ HandshakeTimeout: 300 * time.Second, @@ -1163,7 +1178,7 @@ func executeWebSocketRequest(config *Config, transportType, jwtAuth, target stri if config.VerboseLevel > 0 { fmt.Printf("\nwebsocket connection fail: %v\n", err) } - return nil, err + return err } defer func(conn *websocket.Conn) { err := conn.Close() @@ -1177,30 +1192,40 @@ func executeWebSocketRequest(config *Config, transportType, jwtAuth, target stri if config.VerboseLevel > 0 { fmt.Printf("\nwebsocket write fail: %v\n", err) } - return nil, err + return err } - _, message, err := conn.ReadMessage() + _, message, err := conn.NextReader() if err != nil { if config.VerboseLevel > 0 { fmt.Printf("\nwebsocket read fail: %v\n", err) } - return nil, err + return err } metrics.RoundTripTime = time.Since(start) + start = time.Now() + if err = json.NewDecoder(message).Decode(&response); err != nil { + return fmt.Errorf("cannot decode websocket message as json %w", err) + } + metrics.UnmarshallingTime += time.Since(start) + if err = validateJsonRpcResponse(response); err != nil { // TODO: improve or remove (casts as well) + return fmt.Errorf("json response in invalid %w", err) + } + if config.VerboseLevel > 1 { - fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(message)) + raw, _ := json.Marshal(response) + fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(raw)) } - return message, nil + return nil } -func executeRequest(ctx context.Context, config *Config, transportType, jwtAuth, target string, request []byte, metrics *TestMetrics) ([]byte, error) { +func executeRequest(ctx context.Context, config *Config, transportType, jwtAuth, target string, request []byte, response any, metrics *TestMetrics) error { if strings.HasPrefix(transportType, "http") { - return executeHttpRequest(ctx, config, transportType, jwtAuth, target, request, metrics) + return executeHttpRequest(ctx, config, transportType, jwtAuth, target, request, response, metrics) } - return executeWebSocketRequest(config, transportType, jwtAuth, target, request, metrics) + return executeWebSocketRequest(config, transportType, jwtAuth, target, request, response, metrics) } func runCompare(jsonDiff bool, errorFile, tempFile1, tempFile2, diffFile string) bool { @@ -1431,8 +1456,8 @@ func (c *JsonRpcCommand) compareJSON(config *Config, daemonFile, expRspFile, dif return true, nil } -func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byte, config *Config, outputDir, daemonFile, expRspFile, diffFile string, outcome *TestOutcome) { - var expectedResponse []byte +func (c *JsonRpcCommand) processResponse(response, result1, responseInFile any, config *Config, outputDir, daemonFile, expRspFile, diffFile string, outcome *TestOutcome) { + var expectedResponse any if result1 != nil { expectedResponse = result1 } else { @@ -1440,7 +1465,7 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt } if config.WithoutCompareResults { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) if err != nil { outcome.Error = err return @@ -1449,48 +1474,46 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt return } - var responseMap map[string]interface{} - var respIsMap bool - start := time.Now() - if err := json.NewDecoder(bytes.NewReader(response)).Decode(&responseMap); err == nil { - outcome.Metrics.UnmarshallingTime += time.Since(start) - respIsMap = true - start = time.Now() - response, err = json.MarshalIndent(responseMap, "", " ") - if err != nil { - outcome.Error = err - return + mapsEqual := func(lhs, rhs map[string]interface{}) bool { + if len(lhs) != len(rhs) { + return false } - outcome.Metrics.MarshallingTime += time.Since(start) - err = validateJsonRpcResponse(responseMap) - if err != nil { - outcome.Error = err - return + for k, lv := range lhs { + rv, ok := rhs[k] + if !ok || !reflect.DeepEqual(lv, rv) { + return false + } } + return true } - var expectedMap map[string]interface{} - var expIsMap bool - start = time.Now() - if err := json.NewDecoder(bytes.NewReader(expectedResponse)).Decode(&expectedMap); err == nil { - outcome.Metrics.UnmarshallingTime += time.Since(start) - expIsMap = true - start := time.Now() - expectedResponse, err = json.MarshalIndent(expectedMap, "", " ") - if err != nil { - outcome.Error = err - return + arrayEqual := func(lhs, rhs []map[string]interface{}) bool { + if len(lhs) != len(rhs) { + return false } - outcome.Metrics.MarshallingTime += time.Since(start) - err = validateJsonRpcResponse(expectedMap) - if err != nil { - outcome.Error = err - return + for i := 0; i < len(lhs); i++ { + if !mapsEqual(lhs[i], rhs[i]) { + return false + } } + return true } - - // Fast path: if actual/expected are identical byte-wise, no need to compare them - if bytes.Equal(response, expectedResponse) { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + compareResponses := func(lhs, rhs any) bool { + leftMap, leftIsMap := lhs.(map[string]interface{}) + rightMap, rightIsMap := rhs.(map[string]interface{}) + if leftIsMap && rightIsMap { + return mapsEqual(leftMap, rightMap) + } + leftArray, leftIsArray := lhs.([]map[string]interface{}) + rightArray, rightIsArray := rhs.([]map[string]interface{}) + if leftIsArray && rightIsArray { + return arrayEqual(leftArray, rightArray) + } + return reflect.DeepEqual(lhs, rhs) + } + // Fast path: if actual/expected are identical, no need to compare them + if compareResponses(response, expectedResponse) { + outcome.Metrics.EqualCount += 1 + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) if err != nil { outcome.Error = err return @@ -1500,13 +1523,15 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt } // Check various conditions where we don't care about differences - if respIsMap && expIsMap { // TODO: extract function ignoreDifferences and handle JSON batch responses + responseMap, respIsMap := response.(map[string]interface{}) // TODO: remove redundant casts + expectedMap, expIsMap := expectedResponse.(map[string]interface{}) // TODO: remove redundant casts + if respIsMap && expIsMap { // TODO: extract function ignoreDifferences and handle JSON batch responses _, responseHasResult := responseMap["result"] expectedResult, expectedHasResult := expectedMap["result"] _, responseHasError := responseMap["error"] expectedError, expectedHasError := expectedMap["error"] if responseHasResult && expectedHasResult && expectedResult == nil && result1 == nil { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) if err != nil { outcome.Error = err return @@ -1515,7 +1540,7 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt return } if responseHasError && expectedHasError && expectedError == nil { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) if err != nil { outcome.Error = err return @@ -1525,7 +1550,7 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt } // TODO: improve len(expectedMap) == 2 which means: just "jsonrpc" and "id" are expected if !expectedHasResult && !expectedHasError && len(expectedMap) == 2 { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) if err != nil { outcome.Error = err return @@ -1534,7 +1559,7 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt return } if responseHasError && expectedHasError && config.DoNotCompareError { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse) + err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) if err != nil { outcome.Error = err return @@ -1545,7 +1570,7 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile []byt } // We need to compare the response and expectedResponse, so we dump them to files first - err := dumpJSONs(true, daemonFile, expRspFile, outputDir, response, expectedResponse) + err := dumpJSONs(true, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) if err != nil { outcome.Error = err return @@ -1600,7 +1625,8 @@ func (c *JsonRpcCommand) run(ctx context.Context, config *Config, descriptor *Te diffFile := outputAPIFilename + "-diff.json" if !config.VerifyWithDaemon { - result, err := executeRequest(ctx, config, transportType, jwtAuth, target, request, &outcome.Metrics) + var result any + err := executeRequest(ctx, config, transportType, jwtAuth, target, request, &result, &outcome.Metrics) if err != nil { outcome.Error = err return @@ -1608,10 +1634,6 @@ func (c *JsonRpcCommand) run(ctx context.Context, config *Config, descriptor *Te if config.VerboseLevel > 2 { fmt.Printf("%s: [%v]\n", config.DaemonUnderTest, result) } - if result == nil { - outcome.Error = errors.New("response is n il (maybe node at " + target + " is down?)") - return - } responseInFile := c.Response daemonFile := outputAPIFilename + "-response.json" @@ -1620,7 +1642,8 @@ func (c *JsonRpcCommand) run(ctx context.Context, config *Config, descriptor *Te c.processResponse(result, nil, responseInFile, config, outputDirName, daemonFile, expRspFile, diffFile, outcome) } else { target = getTarget(DaemonOnDefaultPort, descriptor.Name, config) - result, err := executeRequest(ctx, config, transportType, jwtAuth, target, request, &outcome.Metrics) + var result any + err := executeRequest(ctx, config, transportType, jwtAuth, target, request, &result, &outcome.Metrics) if err != nil { outcome.Error = err return @@ -1628,12 +1651,10 @@ func (c *JsonRpcCommand) run(ctx context.Context, config *Config, descriptor *Te if config.VerboseLevel > 2 { fmt.Printf("%s: [%v]\n", config.DaemonUnderTest, result) } - if result == nil { - outcome.Error = errors.New("response is nil (maybe node at " + target + " is down?)") - return - } + target1 = getTarget(config.DaemonAsReference, descriptor.Name, config) - result1, err := executeRequest(ctx, config, transportType, jwtAuth, target1, request, &outcome.Metrics) + var result1 any + err = executeRequest(ctx, config, transportType, jwtAuth, target1, request, &result1, &outcome.Metrics) if err != nil { outcome.Error = err return @@ -1641,10 +1662,6 @@ func (c *JsonRpcCommand) run(ctx context.Context, config *Config, descriptor *Te if config.VerboseLevel > 2 { fmt.Printf("%s: [%v]\n", config.DaemonAsReference, result1) } - if result1 == nil { - outcome.Error = errors.New("response is nil (maybe node at " + target1 + " is down?)") - return - } daemonFile := outputAPIFilename + getJSONFilenameExt(DaemonOnDefaultPort, target) expRspFile := outputAPIFilename + getJSONFilenameExt(config.DaemonAsReference, target1) @@ -1704,6 +1721,7 @@ type ResultCollector struct { totalMarshallingTime time.Duration totalUnmarshallingTime time.Duration totalComparisonCount int + totalEqualCount int } func newResultCollector(resultsChan chan chan TestResult, config *Config) *ResultCollector { @@ -1736,6 +1754,7 @@ func (c *ResultCollector) start(ctx context.Context, cancelCtx context.CancelFun c.totalMarshallingTime += result.Outcome.Metrics.MarshallingTime c.totalUnmarshallingTime += result.Outcome.Metrics.UnmarshallingTime c.totalComparisonCount += result.Outcome.Metrics.ComparisonCount + c.totalEqualCount += result.Outcome.Metrics.EqualCount } else { c.failedTests++ fmt.Printf("failed: %s\n", result.Outcome.Error.Error()) @@ -1867,10 +1886,12 @@ func runMain() int { var server1 = fmt.Sprintf("%s:%d", config.DaemonOnHost, config.ServerPort) var maxRetries = 10 var retryDelayMs = 1000 - var consistent_block = GetConsistentBlockNumber(config, server1, config.ExternalProviderURL, maxRetries, retryDelayMs) - if consistent_block == nil { - fmt.Printf("ERROR: Tests on latest block: two servers are not synchronized") - return 1 + latestBlock, err := getConsistentLatestBlock(config, server1, config.ExternalProviderURL, maxRetries, retryDelayMs) + if err != nil { + return -1 // TODO: unique return codes? + } + if config.VerboseLevel > 0 { + fmt.Printf("Latest block number for %s, %s: %d\n", server1, config.ExternalProviderURL, latestBlock) } } @@ -2134,6 +2155,7 @@ func runMain() int { fmt.Printf("Total Marshalling time: %v\n", resultsCollector.totalMarshallingTime) fmt.Printf("Total Unmarshalling time: %v\n", resultsCollector.totalUnmarshallingTime) fmt.Printf("Total Comparison count: %v\n", resultsCollector.totalComparisonCount) + fmt.Printf("Total Equal count: %v\n", resultsCollector.totalEqualCount) fmt.Printf("Test session duration: %v\n", elapsed) fmt.Printf("Test session iterations: %d\n", testRep) fmt.Printf("Test suite total APIs: %d\n", availableTestedAPIs) From 19e7c19bb2401e1b24dc411da698122748376f00 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Tue, 23 Dec 2025 11:22:27 +0100 Subject: [PATCH 44/87] fix indentation using json-iterator --- cmd/integration/main.go | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 02b3d714..e91a47ad 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -36,8 +36,6 @@ import ( jsoniter "github.com/json-iterator/go" ) -var json = jsoniter.ConfigCompatibleWithStandardLibrary - const ( DaemonOnOtherPort = "other-daemon" DaemonOnDefaultPort = "rpcdaemon" @@ -289,7 +287,7 @@ func extractArchive(archivePath string, sanitizeExtension bool, metrics *TestMet } start := time.Now() - if err := json.NewDecoder(tarReader).Decode(&jsonrpcCommands); err != nil { + if err := jsoniter.NewDecoder(tarReader).Decode(&jsonrpcCommands); err != nil { return jsonrpcCommands, errors.New("cannot parse JSON " + archivePath + ": " + err.Error()) } metrics.UnmarshallingTime += time.Since(start) @@ -894,7 +892,7 @@ func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response if daemonFile != "" { start := time.Now() - responseData, err := json.MarshalIndent(response, "", " ") + responseData, err := jsoniter.MarshalIndent(response, "", " ") if err != nil { return err } @@ -906,7 +904,7 @@ func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response if expRspFile != "" { start := time.Now() - expectedResponseData, err := json.MarshalIndent(expectedResponse, "", " ") + expectedResponseData, err := jsoniter.MarshalIndent(expectedResponse, "", " ") if err != nil { return err } @@ -1079,7 +1077,7 @@ func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtA } start = time.Now() - if err = json.NewDecoder(resp.Body).Decode(response); err != nil { + if err = jsoniter.NewDecoder(resp.Body).Decode(response); err != nil { return fmt.Errorf("cannot decode http body as json %w", err) } metrics.UnmarshallingTime += time.Since(start) @@ -1088,7 +1086,7 @@ func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtA } if config.VerboseLevel > 1 { - raw, _ := json.Marshal(response) + raw, _ := jsoniter.Marshal(response) fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(raw)) } @@ -1116,7 +1114,7 @@ func getLatestBlockNumber(ctx context.Context, config *Config, url string, metri Params: []interface{}{}, Id: 1, } - requestBytes, _ := json.Marshal(request) + requestBytes, _ := jsoniter.Marshal(request) var response JsonRpcResponse err := executeHttpRequest(ctx, config, "http", "", url, requestBytes, response, metrics) @@ -1205,7 +1203,7 @@ func executeWebSocketRequest(config *Config, transportType, jwtAuth, target stri metrics.RoundTripTime = time.Since(start) start = time.Now() - if err = json.NewDecoder(message).Decode(&response); err != nil { + if err = jsoniter.NewDecoder(message).Decode(&response); err != nil { return fmt.Errorf("cannot decode websocket message as json %w", err) } metrics.UnmarshallingTime += time.Since(start) @@ -1214,7 +1212,7 @@ func executeWebSocketRequest(config *Config, transportType, jwtAuth, target stri } if config.VerboseLevel > 1 { - raw, _ := json.Marshal(response) + raw, _ := jsoniter.Marshal(response) fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(raw)) } @@ -1341,7 +1339,7 @@ func extractJsonCommands(jsonFilename string, metrics *TestMetrics) ([]JsonRpcCo var jsonrpcCommands []JsonRpcCommand start := time.Now() - if err := json.NewDecoder(reader).Decode(&jsonrpcCommands); err != nil { + if err := jsoniter.NewDecoder(reader).Decode(&jsonrpcCommands); err != nil { return nil, fmt.Errorf("cannot parse JSON %s: %w", jsonFilename, err) } metrics.UnmarshallingTime += time.Since(start) @@ -2066,12 +2064,6 @@ func runMain() int { shouldRun := false if config.TestingAPIsWith == "" && config.TestingAPIs == "" && (config.ReqTestNumber == -1 || config.ReqTestNumber == testNumberInAnyLoop) { shouldRun = true - /*if slices.Contains([]int{29, 37, 133, 173, 1008, 1272, 1274}, testNumberInAnyLoop) { - file := fmt.Sprintf("%-60s", jsonTestFullName) - tt := fmt.Sprintf("%-15s", transportType) - fmt.Printf("%04d. %s::%s skipped as long-running\n", testNumberInAnyLoop, tt, file) - shouldRun = false - }*/ } else if config.TestingAPIsWith != "" && checkTestNameForNumber(testName, config.ReqTestNumber) { shouldRun = true } else if config.TestingAPIs != "" && checkTestNameForNumber(testName, config.ReqTestNumber) { From c5f71055069351601477475b97fb5a21ede27abc Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Tue, 23 Dec 2025 11:34:46 +0100 Subject: [PATCH 45/87] refactoring retryDelay as Duration --- cmd/integration/main.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index e91a47ad..5fec668c 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1130,9 +1130,8 @@ func getLatestBlockNumber(ctx context.Context, config *Config, url string, metri return strconv.ParseUint(result, 16, 64) } -func getConsistentLatestBlock(config *Config, server1URL, server2URL string, maxRetries int, retryDelayMs int) (uint64, error) { +func getConsistentLatestBlock(config *Config, server1URL, server2URL string, maxRetries int, retryDelay time.Duration) (uint64, error) { var bn1, bn2 uint64 - delay := time.Duration(retryDelayMs) * time.Millisecond metrics := TestMetrics{} for i := 0; i < maxRetries; i++ { @@ -1152,7 +1151,7 @@ func getConsistentLatestBlock(config *Config, server1URL, server2URL string, max } if i < maxRetries-1 { - time.Sleep(delay) + time.Sleep(retryDelay) } } @@ -1883,8 +1882,8 @@ func runMain() int { if config.VerifyWithDaemon && config.TestsOnLatestBlock { var server1 = fmt.Sprintf("%s:%d", config.DaemonOnHost, config.ServerPort) var maxRetries = 10 - var retryDelayMs = 1000 - latestBlock, err := getConsistentLatestBlock(config, server1, config.ExternalProviderURL, maxRetries, retryDelayMs) + var retryDelay = 1 * time.Second + latestBlock, err := getConsistentLatestBlock(config, server1, config.ExternalProviderURL, maxRetries, retryDelay) if err != nil { return -1 // TODO: unique return codes? } From b86c9d6f0171a8f3daf20152d4079a0f2f5c97e6 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Wed, 24 Dec 2025 11:00:23 +0100 Subject: [PATCH 46/87] few fix rename reference to Daemon to Client --- cmd/perf/main.go | 112 ++++++++++++++++++++++------------------------- 1 file changed, 52 insertions(+), 60 deletions(-) diff --git a/cmd/perf/main.go b/cmd/perf/main.go index 104ac3de..08843858 100644 --- a/cmd/perf/main.go +++ b/cmd/perf/main.go @@ -30,24 +30,24 @@ const ( DefaultTestSequence = "50:30,1000:30,2500:20,10000:20" DefaultRepetitions = 10 DefaultVegetaPatternTarFile = "" - DefaultDaemonVegetaOnCore = "-:-" + DefaultClientVegetaOnCore = "-:-" DefaultServerAddress = "localhost" DefaultWaitingTime = 5 DefaultMaxConn = "9000" DefaultTestType = "eth_getLogs" DefaultVegetaResponseTimeout = "300s" DefaultMaxBodyRsp = "1500" + DefaultClientName = "rpcdaemon" - ServerName = "rpcdaemon" - BinaryDir = "bin" + BinaryDir = "bin" ) var ( - RunTestDirname string - VegetaPatternDirname string - VegetaReport string - VegetaTarFileName string - VegetaPatternBase string + RunTestDirname string + VegetaPatternDirname string + VegetaReport string + VegetaTarFileName string + VegetaPatternBase string ) func init() { @@ -57,18 +57,18 @@ func init() { VegetaPatternDirname = RunTestDirname + "/erigon_stress_test" VegetaReport = RunTestDirname + "/vegeta_report.hrd" VegetaTarFileName = RunTestDirname + "/vegeta_TAR_File" - VegetaPatternBase = VegetaPatternDirname + "/vegeta_erigon_" + VegetaPatternBase = VegetaPatternDirname + "/vegeta_erigon_" } // Config holds all configuration for the performance test type Config struct { VegetaPatternTarFile string - DaemonVegetaOnCore string + ClientVegetaOnCore string Repetitions int TestSequence string - RPCDaemonAddress string + ClientAddress string TestType string - TestingDaemon string + TestingClient string WaitingTime int VersionedTestReport bool Verbose bool @@ -93,12 +93,12 @@ type Config struct { func NewConfig() *Config { return &Config{ VegetaPatternTarFile: DefaultVegetaPatternTarFile, - DaemonVegetaOnCore: DefaultDaemonVegetaOnCore, + ClientVegetaOnCore: DefaultClientVegetaOnCore, Repetitions: DefaultRepetitions, TestSequence: DefaultTestSequence, - RPCDaemonAddress: DefaultServerAddress, + ClientAddress: DefaultServerAddress, TestType: DefaultTestType, - TestingDaemon: "", + TestingClient: DefaultClientName, WaitingTime: DefaultWaitingTime, VersionedTestReport: false, Verbose: false, @@ -122,8 +122,8 @@ func NewConfig() *Config { // Validate checks the configuration for conflicts and invalid values func (c *Config) Validate() error { - if c.JSONReportFile != "" && c.TestingDaemon == "" { - return fmt.Errorf("with json-report must also set testing-daemon") + if c.JSONReportFile != "" && c.TestingClient == "" { + return fmt.Errorf("with json-report must also set testing-client") } if c.EmptyCache { @@ -187,7 +187,7 @@ type VegetaTarget struct { // TestMetrics holds the results of a performance test type TestMetrics struct { - DaemonName string + ClientName string TestNumber int Repetition int QPS int @@ -225,7 +225,7 @@ type PlatformInfo struct { // ConfigurationInfo holds test configuration information type ConfigurationInfo struct { - TestingDaemon string `json:"testingDaemon"` + TestingClient string `json:"testingClient"` TestingAPI string `json:"testingApi"` TestSequence string `json:"testSequence"` TestRepetitions int `json:"testRepetitions"` @@ -416,9 +416,10 @@ func GetFileChecksum(filepath string) string { // IsProcessRunning checks if a process with the given name is running func IsProcessRunning(processName string) bool { - cmd := exec.Command("pgrep", "-f", processName) - err := cmd.Run() - return err == nil + cmd := exec.Command("pgrep", "-x", processName) + out, err := cmd.Output() + + return err == nil && len(out) > 0 } // EmptyCache drops OS caches @@ -563,10 +564,10 @@ func (pt *PerfTest) CopyAndExtractPatternFile() error { } // Substitute address if not localhost - if pt.config.RPCDaemonAddress != "localhost" { - patternDir := VegetaPatternBase + pt.config.TestType + ".txt" + if pt.config.ClientAddress != "localhost" { + patternDir := VegetaPatternBase + pt.config.TestType + ".txt" - if err := pt.replaceInFile(patternDir, "localhost", pt.config.RPCDaemonAddress); err != nil { + if err := pt.replaceInFile(patternDir, "localhost", pt.config.ClientAddress); err != nil { log.Printf("Warning: failed to replace address in pattern: %v", err) } } @@ -689,14 +690,14 @@ func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, nam // Determine pattern file var pattern string - pattern = VegetaPatternBase + pt.config.TestType + ".txt" + pattern = VegetaPatternBase + pt.config.TestType + ".txt" // Create the binary file name timestamp := time.Now().Format("20060102150405") pt.config.BinaryFile = fmt.Sprintf("%s_%s_%s_%s_%d_%d_%d.bin", timestamp, pt.config.ChainName, - pt.config.TestingDaemon, + pt.config.TestingClient, pt.config.TestType, qps, duration, @@ -705,7 +706,7 @@ func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, nam // Create the binary directory var dirname string if pt.config.VersionedTestReport { - dirname = "./reports/" + BinaryDir + "/" + dirname = "./perf/reports/" + BinaryDir + "/" } else { dirname = RunTestDirname + "/" + BinaryDir + "/" } @@ -720,13 +721,8 @@ func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, nam maxRepetitionDigits := strconv.Itoa(format.maxRepetitionDigits) maxQpsDigits := strconv.Itoa(format.maxQpsDigits) maxDurationDigits := strconv.Itoa(format.maxDurationDigits) - if pt.config.TestingDaemon != "" { - fmt.Printf("[%d.%"+maxRepetitionDigits+"d] %s: executes test qps: %"+maxQpsDigits+"d time: %"+maxDurationDigits+"d -> ", - testNumber, repetition+1, pt.config.TestingDaemon, qps, duration) - } else { - fmt.Printf("[%d.%"+maxRepetitionDigits+"d] daemon: executes test qps: %"+maxQpsDigits+"d time: %"+maxDurationDigits+"d -> ", - testNumber, repetition+1, qps, duration) - } + fmt.Printf("[%d.%"+maxRepetitionDigits+"d] %s: executes test qps: %"+maxQpsDigits+"d time: %"+maxDurationDigits+"d -> ", + testNumber, repetition+1, pt.config.TestingClient, qps, duration) // Load targets from pattern file targets, err := pt.loadTargets(pattern) @@ -742,10 +738,7 @@ func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, nam // Check if the server is still alive during the test if pt.config.CheckServerAlive { - var serverName string - serverName = ServerName - - if !IsProcessRunning(serverName) { + if !IsProcessRunning(pt.config.TestingClient) { fmt.Println("test failed: server is Dead") return fmt.Errorf("server died during test") } @@ -876,7 +869,7 @@ func (pt *PerfTest) ExecuteSequence(ctx context.Context, sequence []TestSequence // Get pattern to extract port information var pattern string - pattern = VegetaPatternBase + pt.config.TestType + ".txt" + pattern = VegetaPatternBase + pt.config.TestType + ".txt" // Print port information if file, err := os.Open(pattern); err == nil { @@ -1017,7 +1010,7 @@ func (pt *PerfTest) processResults(testNumber, repetition int, name string, qps, // Write to the test report if enabled if pt.config.CreateTestReport { testMetrics := &TestMetrics{ - DaemonName: name, + ClientName: name, TestNumber: testNumber, Repetition: repetition, QPS: qps, @@ -1125,7 +1118,6 @@ func (tr *TestReport) Open() error { // Initialise the JSON report if needed if tr.config.JSONReportFile != "" { - fmt.Println ("init gen report file") tr.initializeJSONReport(cpuModel, bogomips, kernelVersion, checksum, gccVersion, goVersion) } @@ -1145,7 +1137,7 @@ func (tr *TestReport) createCSVFile() error { csvFolder := tr.hardware.NormalizedVendor() + "_" + extension var csvFolderPath string if tr.config.VersionedTestReport { - csvFolderPath = filepath.Join("./reports", tr.config.ChainName, csvFolder) + csvFolderPath = filepath.Join("./perf/reports", tr.config.ChainName, csvFolder) } else { csvFolderPath = filepath.Join(RunTestDirname, tr.config.ChainName, csvFolder) } @@ -1157,9 +1149,9 @@ func (tr *TestReport) createCSVFile() error { // Generate CSV filename timestamp := time.Now().Format("20060102150405") var csvFilename string - if tr.config.TestingDaemon != "" { + if tr.config.TestingClient != "" { csvFilename = fmt.Sprintf("%s_%s_%s_perf.csv", - tr.config.TestType, timestamp, tr.config.TestingDaemon) + tr.config.TestType, timestamp, tr.config.TestingClient) } else { csvFilename = fmt.Sprintf("%s_%s_perf.csv", tr.config.TestType, timestamp) @@ -1217,7 +1209,7 @@ func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksu if err != nil { return err } - err = tr.csvWriter.Write(append(emptyRow[:12], "taskset", tr.config.DaemonVegetaOnCore)) + err = tr.csvWriter.Write(append(emptyRow[:12], "taskset", tr.config.ClientVegetaOnCore)) if err != nil { return err } @@ -1248,7 +1240,7 @@ func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksu // Write column headers headers := []string{ - "Daemon", "TestNo", "Repetition", "Qps", "Time(secs)", + "ClientName", "TestNo", "Repetition", "Qps", "Time(secs)", "Min", "Mean", "50", "90", "95", "99", "Max", "Ratio", "Error", } err = tr.csvWriter.Write(headers) @@ -1276,13 +1268,13 @@ func (tr *TestReport) initializeJSONReport(cpuModel, bogomips, kernelVersion, ch GoVersion: strings.TrimSpace(goVersion), }, Configuration: ConfigurationInfo{ - TestingDaemon: tr.config.TestingDaemon, + TestingClient: tr.config.TestingClient, TestingAPI: tr.config.TestType, TestSequence: tr.config.TestSequence, TestRepetitions: tr.config.Repetitions, VegetaFile: tr.config.VegetaPatternTarFile, VegetaChecksum: checksum, - Taskset: tr.config.DaemonVegetaOnCore, + Taskset: tr.config.ClientVegetaOnCore, }, Results: []TestResult{}, } @@ -1292,7 +1284,7 @@ func (tr *TestReport) initializeJSONReport(cpuModel, bogomips, kernelVersion, ch func (tr *TestReport) WriteTestReport(metrics *TestMetrics) error { // Write to CSV row := []string{ - metrics.DaemonName, + metrics.ClientName, strconv.Itoa(metrics.TestNumber), strconv.Itoa(metrics.Repetition), strconv.Itoa(metrics.QPS), @@ -1315,7 +1307,6 @@ func (tr *TestReport) WriteTestReport(metrics *TestMetrics) error { // Write to JSON if enabled if tr.config.JSONReportFile != "" { - fmt.Println ("write Test Report") if err := tr.writeTestReportToJSON(metrics); err != nil { return fmt.Errorf("failed to write JSON report: %w", err) } @@ -1541,9 +1532,10 @@ func main() { Usage: "Maximum number of connections", }, &cli.StringFlag{ - Name: "testing-daemon", + Name: "testing-client", Aliases: []string{"D"}, - Usage: "Name of testing daemon", + Value: DefaultClientName, + Usage: "Name of testing client", }, &cli.StringFlag{ Name: "blockchain", @@ -1582,15 +1574,15 @@ func main() { Usage: "Wait time between test iterations in seconds", }, &cli.StringFlag{ - Name: "rpc-daemon-address", + Name: "rpc-client-address", Aliases: []string{"d"}, Value: DefaultServerAddress, - Usage: "RPC daemon address (e.g., 192.2.3.1)", + Usage: "Client address (e.g., 192.2.3.1)", }, &cli.StringFlag{ Name: "run-vegeta-on-core", Aliases: []string{"c"}, - Value: DefaultDaemonVegetaOnCore, + Value: DefaultClientVegetaOnCore, Usage: "Taskset format for Vegeta (e.g., 0-1:2-3)", }, &cli.StringFlag{ @@ -1648,15 +1640,15 @@ func runPerfTests(c *cli.Context) error { config.EmptyCache = c.Bool("empty-cache") config.MaxConnection = c.String("max-connections") - config.TestingDaemon = c.String("testing-daemon") + config.TestingClient = c.String("testing-client") config.ChainName = c.String("blockchain") config.TestType = c.String("test-type") config.VegetaPatternTarFile = c.String("pattern-file") config.Repetitions = c.Int("repetitions") config.TestSequence = c.String("test-sequence") config.WaitingTime = c.Int("wait-after-test-sequence") - config.RPCDaemonAddress = c.String("rpc-daemon-address") - config.DaemonVegetaOnCore = c.String("run-vegeta-on-core") + config.ClientAddress = c.String("rpc-client-address") + config.ClientVegetaOnCore = c.String("run-vegeta-on-core") config.VegetaResponseTimeout = c.String("response-timeout") config.MaxBodyRsp = c.String("max-body-rsp") config.JSONReportFile = c.String("json-report") @@ -1710,7 +1702,7 @@ func runPerfTests(c *cli.Context) error { // Create context ctx := context.Background() - if err := perfTest.ExecuteSequence(ctx, sequence, ServerName); err != nil { + if err := perfTest.ExecuteSequence(ctx, sequence, config.TestingClient); err != nil { fmt.Printf("Performance Test failed, error: %v\n", err) return err } From 2f2964326275074354504317a0f7eee5aaf839b2 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Wed, 24 Dec 2025 12:15:12 +0100 Subject: [PATCH 47/87] add again comit version but optional --- cmd/perf/main.go | 69 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 49 insertions(+), 20 deletions(-) diff --git a/cmd/perf/main.go b/cmd/perf/main.go index 08843858..b3d29eb1 100644 --- a/cmd/perf/main.go +++ b/cmd/perf/main.go @@ -38,6 +38,7 @@ const ( DefaultVegetaResponseTimeout = "300s" DefaultMaxBodyRsp = "1500" DefaultClientName = "rpcdaemon" + DefaultClientBuildDir = "" BinaryDir = "bin" ) @@ -64,6 +65,7 @@ func init() { type Config struct { VegetaPatternTarFile string ClientVegetaOnCore string + ClientBuildDir string Repetitions int TestSequence string ClientAddress string @@ -94,6 +96,7 @@ func NewConfig() *Config { return &Config{ VegetaPatternTarFile: DefaultVegetaPatternTarFile, ClientVegetaOnCore: DefaultClientVegetaOnCore, + ClientBuildDir: DefaultClientBuildDir, Repetitions: DefaultRepetitions, TestSequence: DefaultTestSequence, ClientAddress: DefaultServerAddress, @@ -126,6 +129,12 @@ func (c *Config) Validate() error { return fmt.Errorf("with json-report must also set testing-client") } + if c.ClientBuildDir != "" { + if _, err := os.Stat(c.ClientBuildDir); c.ClientBuildDir != "" && os.IsNotExist(err) { + return fmt.Errorf("client build dir not specified correctly: %s", c.ClientBuildDir) + } + } + if c.EmptyCache { currentUser, err := user.Current() if err != nil { @@ -213,14 +222,15 @@ type JSONReport struct { // PlatformInfo holds platform hardware and software information type PlatformInfo struct { - Vendor string `json:"vendor"` - Product string `json:"product"` - Board string `json:"board"` - CPU string `json:"cpu"` - Bogomips string `json:"bogomips"` - Kernel string `json:"kernel"` - GCCVersion string `json:"gccVersion"` - GoVersion string `json:"goVersion"` + Vendor string `json:"vendor"` + Product string `json:"product"` + Board string `json:"board"` + CPU string `json:"cpu"` + Bogomips string `json:"bogomips"` + Kernel string `json:"kernel"` + GCCVersion string `json:"gccVersion"` + GoVersion string `json:"goVersion"` + ClientCommit string `json:"clientCommit"` } // ConfigurationInfo holds test configuration information @@ -1110,16 +1120,23 @@ func (tr *TestReport) Open() error { cpuModel := tr.hardware.GetCPUModel() bogomips := tr.hardware.GetBogomips() + var clientCommit string + if tr.config.ClientBuildDir != "" { + clientCommit = GetGitCommit(tr.config.ClientBuildDir) + } else { + clientCommit = "none" + } + // Write headers if err := tr.writeTestHeader(cpuModel, bogomips, kernelVersion, checksum, - gccVersion, goVersion); err != nil { + gccVersion, goVersion, clientCommit); err != nil { return fmt.Errorf("failed to write test header: %w", err) } // Initialise the JSON report if needed if tr.config.JSONReportFile != "" { tr.initializeJSONReport(cpuModel, bogomips, kernelVersion, checksum, - gccVersion, goVersion) + gccVersion, goVersion, clientCommit) } return nil @@ -1174,7 +1191,7 @@ func (tr *TestReport) createCSVFile() error { } // writeTestHeader writes the test configuration header to CSV -func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksum, gccVersion, goVersion string) error { +func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksum, gccVersion, goVersion, clientCommit string) error { // Write platform information emptyRow := make([]string, 14) @@ -1229,6 +1246,10 @@ func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksu if err != nil { return err } + err = tr.csvWriter.Write(append(emptyRow[:12], "clientVersion", clientCommit)) + if err != nil { + return err + } // Empty rows for range 2 { @@ -1254,18 +1275,19 @@ func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksu // initializeJSONReport initializes the JSON report structure func (tr *TestReport) initializeJSONReport(cpuModel, bogomips, kernelVersion, checksum, - gccVersion, goVersion string) { + gccVersion, goVersion, clientCommit string) { tr.jsonReport = &JSONReport{ Platform: PlatformInfo{ - Vendor: strings.TrimSpace(tr.hardware.Vendor()), - Product: strings.TrimSpace(tr.hardware.Product()), - Board: strings.TrimSpace(tr.hardware.Board()), - CPU: strings.TrimSpace(cpuModel), - Bogomips: strings.TrimSpace(bogomips), - Kernel: strings.TrimSpace(kernelVersion), - GCCVersion: strings.TrimSpace(gccVersion), - GoVersion: strings.TrimSpace(goVersion), + Vendor: strings.TrimSpace(tr.hardware.Vendor()), + Product: strings.TrimSpace(tr.hardware.Product()), + Board: strings.TrimSpace(tr.hardware.Board()), + CPU: strings.TrimSpace(cpuModel), + Bogomips: strings.TrimSpace(bogomips), + Kernel: strings.TrimSpace(kernelVersion), + GCCVersion: strings.TrimSpace(gccVersion), + GoVersion: strings.TrimSpace(goVersion), + ClientCommit: strings.TrimSpace(clientCommit), }, Configuration: ConfigurationInfo{ TestingClient: tr.config.TestingClient, @@ -1579,6 +1601,12 @@ func main() { Value: DefaultServerAddress, Usage: "Client address (e.g., 192.2.3.1)", }, + &cli.StringFlag{ + Name: "client-build-dir", + Aliases: []string{"g"}, + Value: DefaultClientBuildDir, + Usage: "Path to Client build folder", + }, &cli.StringFlag{ Name: "run-vegeta-on-core", Aliases: []string{"c"}, @@ -1648,6 +1676,7 @@ func runPerfTests(c *cli.Context) error { config.TestSequence = c.String("test-sequence") config.WaitingTime = c.Int("wait-after-test-sequence") config.ClientAddress = c.String("rpc-client-address") + config.ClientBuildDir = c.String("client-build-dir") config.ClientVegetaOnCore = c.String("run-vegeta-on-core") config.VegetaResponseTimeout = c.String("response-timeout") config.MaxBodyRsp = c.String("max-body-rsp") From 82817cc30a8813f67ebbe3463dca3d10fef388d0 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Wed, 24 Dec 2025 15:22:53 +0100 Subject: [PATCH 48/87] tests read all pattern --- cmd/perf/main.go | 96 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 79 insertions(+), 17 deletions(-) diff --git a/cmd/perf/main.go b/cmd/perf/main.go index b3d29eb1..d62e126d 100644 --- a/cmd/perf/main.go +++ b/cmd/perf/main.go @@ -5,6 +5,7 @@ import ( "bufio" "bytes" "compress/bzip2" + "compress/gzip" "context" "encoding/csv" "encoding/json" @@ -484,6 +485,53 @@ type PerfTest struct { testReport *TestReport } +// Supported compression types +const ( + GzipCompression = ".gz" + Bzip2Compression = ".bz2" + NoCompression = "" +) + +// getCompressionType determines the compression from the filename extension. +func getCompressionType(filename string) string { + if strings.HasSuffix(filename, ".tar.gz") || strings.HasSuffix(filename, ".tgz") { + return GzipCompression + } + if strings.HasSuffix(filename, ".tar.bz2") || strings.HasSuffix(filename, ".tbz") { + return Bzip2Compression + } + return NoCompression +} + +func autodetectCompression(inFile *os.File) (string, error) { + // Assume we have no compression and try to detect it if the tar header is invalid + compressionType := NoCompression + tarReader := tar.NewReader(inFile) + _, err := tarReader.Next() + if err != nil && !errors.Is(err, io.EOF) { + // Reset the file position for read and check if it's gzip encoded + _, err = inFile.Seek(0, io.SeekStart) + if err != nil { + return compressionType, err + } + _, err = gzip.NewReader(inFile) + if err == nil { + compressionType = GzipCompression + } else { + // Reset the file position for read and check if it's gzip encoded + _, err = inFile.Seek(0, io.SeekStart) + if err != nil { + return compressionType, err + } + _, err = tar.NewReader(bzip2.NewReader(inFile)).Next() + if err == nil { + compressionType = Bzip2Compression + } + } + } + return compressionType, nil +} + // NewPerfTest creates a new performance test instance func NewPerfTest(config *Config, testReport *TestReport) (*PerfTest, error) { pt := &PerfTest{ @@ -615,29 +663,42 @@ func (pt *PerfTest) copyFile(src, dst string) error { // extractTarGz extracts a tar.gz file to a destination directory func (pt *PerfTest) extractTarGz(tarFile, destDir string) error { + // Open the archive file file, err := os.Open(tarFile) if err != nil { - return err + return fmt.Errorf("failed to open archive: %w", err) } - defer func(file *os.File) { - err := file.Close() - if err != nil { - log.Printf("Warning: failed to close tar file: %v", err) - } + defer func(inFile *os.File) { + _ = inFile.Close() }(file) - /*gzr, err := gzip.NewReader(file) - if err != nil { - return err - } - defer func(gzr *gzip.Reader) { - err := gzr.Close() + // Wrap the input file with the correct compression reader + compressionType := getCompressionType(tarFile) + if compressionType == NoCompression { + // Possibly handle the corner case where the file is compressed but has tar extension + compressionType, err = autodetectCompression(file) if err != nil { - log.Printf("Warning: failed to close gzip reader: %v", err) + return fmt.Errorf("failed to autodetect compression for archive: %w", err) } - }(gzr)*/ + file, err = os.Open(tarFile) + if err != nil { + return err + } + } + + var reader io.Reader + switch compressionType { + case GzipCompression: + if reader, err = gzip.NewReader(file); err != nil { + return fmt.Errorf("failed to create gzip reader: %w", err) + } + case Bzip2Compression: + reader = bzip2.NewReader(file) + case NoCompression: + reader = file + } - tr := tar.NewReader(bzip2.NewReader(file)) + tr := tar.NewReader(reader) for { header, err := tr.Next() @@ -771,10 +832,11 @@ func (pt *PerfTest) loadTargets(filepath string) ([]vegeta.Target, error) { } }(file) + const maxCapacity = 1024 * 1024 // 1MB var targets []vegeta.Target scanner := bufio.NewScanner(file) - buffer := make([]byte, 0, 256*1024) - scanner.Buffer(buffer, cap(buffer)) + buffer := make([]byte, 0, maxCapacity) + scanner.Buffer(buffer, maxCapacity) for scanner.Scan() { line := scanner.Text() From bb38c0c922fd0257be3ac5b1fad7604d53155445 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Thu, 25 Dec 2025 09:36:33 +0100 Subject: [PATCH 49/87] permit to disable http compression --- cmd/perf/main.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/cmd/perf/main.go b/cmd/perf/main.go index d62e126d..b32195e4 100644 --- a/cmd/perf/main.go +++ b/cmd/perf/main.go @@ -90,6 +90,7 @@ type Config struct { MorePercentiles bool InstantReport bool HaltOnVegetaError bool + DisableHttpCompression bool } // NewConfig creates a new Config with default values @@ -121,6 +122,7 @@ func NewConfig() *Config { MorePercentiles: false, InstantReport: false, HaltOnVegetaError: false, + DisableHttpCompression: false, } } @@ -894,7 +896,17 @@ func (pt *PerfTest) runVegetaAttack(ctx context.Context, targets []vegeta.Target maxConnInt, _ := strconv.Atoi(pt.config.MaxConnection) maxBodyInt, _ := strconv.Atoi(pt.config.MaxBodyRsp) + tr := &http.Transport{ + DisableCompression: pt.config.DisableHttpCompression, + Proxy: http.ProxyFromEnvironment, + } + + customClient := &http.Client{ + Transport: tr, + } + attacker := vegeta.NewAttacker( + vegeta.Client(customClient), vegeta.Timeout(timeout), vegeta.Workers(uint64(maxConnInt)), vegeta.MaxBody(int64(maxBodyInt)), @@ -1579,6 +1591,11 @@ func main() { Name: "rpc_perf", Usage: "Launch an automated sequence of RPC performance tests on on target blockchain node(s)", Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "disable-http-compression", + Aliases: []string{"O"}, + Usage: "Disable Http compression", + }, &cli.BoolFlag{ Name: "not-verify-server-alive", Aliases: []string{"Z"}, @@ -1722,6 +1739,7 @@ func runPerfTests(c *cli.Context) error { // Create configuration from CLI flags config := NewConfig() + config.DisableHttpCompression = c.Bool("disable-http-compression") config.CheckServerAlive = !c.Bool("not-verify-server-alive") config.CreateTestReport = c.Bool("tmp-test-report") || c.Bool("test-report") config.VersionedTestReport = c.Bool("test-report") From 31b39c3e9fc189fc402437546e1098a99f244577 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Mon, 5 Jan 2026 15:37:26 +0100 Subject: [PATCH 50/87] fix --- integration/run_tests.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/integration/run_tests.py b/integration/run_tests.py index 92adc18c..b9f4b6ec 100755 --- a/integration/run_tests.py +++ b/integration/run_tests.py @@ -727,21 +727,10 @@ def run_compare(use_jsondiff, error_file, temp_file1, temp_file2, diff_file, tes idx += 1 time.sleep(TIME) # verify if json-diff or diff in progress - cmd = "ps aux | grep -v run_tests | grep 'diff' | grep -v 'grep' | grep test_" + str(test_number) + " | awk '{print $2}'" + cmd = "ps aux | grep -v run_tests | grep 'diff' | grep -v 'grep' | awk '{print $2}'" pid = os.popen(cmd).read() if pid == "": # json-diff or diff terminated - error_file_size = os.stat(error_file).st_size - if error_file_size != 0: - if already_failed: - # timeout with json-diff and diff so return timeout->0 - return 0 - already_failed = True - # try json diffs with diff - cmd = "diff " + temp_file2 + " " + temp_file1 + " > " + diff_file + " 2> " + error_file + " &" - os.system(cmd) - idx = 0 - continue return 1 if idx >= MAX_TIME: killing_pid = pid.strip() From 54a90a9ef6fd68792987b9e2da9d4cccc7ad8705 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 9 Jan 2026 18:35:03 +0100 Subject: [PATCH 51/87] integration: add json-diff impl --- cmd/integration/jsondiff/diff.go | 470 +++++++++++++++++++++++++++++++ cmd/integration/main.go | 82 +++++- 2 files changed, 541 insertions(+), 11 deletions(-) create mode 100644 cmd/integration/jsondiff/diff.go diff --git a/cmd/integration/jsondiff/diff.go b/cmd/integration/jsondiff/diff.go new file mode 100644 index 00000000..f6933a05 --- /dev/null +++ b/cmd/integration/jsondiff/diff.go @@ -0,0 +1,470 @@ +package jsondiff + +import ( + "encoding/json" + "fmt" + "reflect" + "sort" + "strings" +) + +// DiffType represents the type of difference +type DiffType string + +const ( + DiffAdd DiffType = "add" + DiffDelete DiffType = "delete" + DiffUpdate DiffType = "update" + DiffEqual DiffType = "equal" +) + +// Diff represents a single difference +type Diff struct { + Type DiffType + Path string + OldValue interface{} + NewValue interface{} +} + +// Options configures the diff behavior +type Options struct { + // Full causes all unchanged values to be included in output + Full bool + // KeepUnchangedValues includes unchanged values in the diff result + KeepUnchangedValues bool + // OutputKeys are the keys to include in the output + OutputKeys []string + // Sort keys in output + Sort bool + // SortArrays sorts primitive values in arrays before comparing + SortArrays bool +} + +// DiffJSON computes the difference between two JSON objects +func DiffJSON(obj1, obj2 interface{}, opts *Options) map[string]interface{} { + if opts == nil { + opts = &Options{} + } + + result := make(map[string]interface{}) + diff(obj1, obj2, "", result, opts) + + return result +} + +// DiffString returns a human-readable string representation of differences +func DiffString(obj1, obj2 interface{}, opts *Options) string { + if opts == nil { + opts = &Options{} + } + + diffs := collectDiffs(obj1, obj2, "") + + var sb strings.Builder + for _, d := range diffs { + switch d.Type { + case DiffAdd: + sb.WriteString(fmt.Sprintf("+ %s: %v\n", d.Path, formatValue(d.NewValue))) + case DiffDelete: + sb.WriteString(fmt.Sprintf("- %s: %v\n", d.Path, formatValue(d.OldValue))) + case DiffUpdate: + sb.WriteString(fmt.Sprintf("~ %s: %v -> %v\n", d.Path, formatValue(d.OldValue), formatValue(d.NewValue))) + case DiffEqual: + if opts.Full { + sb.WriteString(fmt.Sprintf(" %s: %v\n", d.Path, formatValue(d.NewValue))) + } + } + } + + return sb.String() +} + +// ColoredString returns a colored diff string (for terminal output) +func ColoredString(obj1, obj2 interface{}, opts *Options) string { + if opts == nil { + opts = &Options{} + } + + diffs := collectDiffs(obj1, obj2, "") + + const ( + colorReset = "\033[0m" + colorRed = "\033[31m" + colorGreen = "\033[32m" + colorYellow = "\033[33m" + ) + + var sb strings.Builder + for _, d := range diffs { + switch d.Type { + case DiffAdd: + sb.WriteString(fmt.Sprintf("%s+ %s: %v%s\n", colorGreen, d.Path, formatValue(d.NewValue), colorReset)) + case DiffDelete: + sb.WriteString(fmt.Sprintf("%s- %s: %v%s\n", colorRed, d.Path, formatValue(d.OldValue), colorReset)) + case DiffUpdate: + sb.WriteString(fmt.Sprintf("%s~ %s: %v -> %v%s\n", colorYellow, d.Path, formatValue(d.OldValue), formatValue(d.NewValue), colorReset)) + case DiffEqual: + if opts.Full { + sb.WriteString(fmt.Sprintf(" %s: %v\n", d.Path, formatValue(d.NewValue))) + } + } + } + + return sb.String() +} + +func diff(obj1, obj2 interface{}, path string, result map[string]interface{}, opts *Options) { + // Handle nil cases + if obj1 == nil && obj2 == nil { + if opts.KeepUnchangedValues { + result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + } + return + } + + if obj1 == nil { + result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + return + } + + if obj2 == nil { + result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + return + } + + v1 := reflect.ValueOf(obj1) + v2 := reflect.ValueOf(obj2) + + // If types are different, mark as changed + if v1.Kind() != v2.Kind() { + result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + return + } + + switch v1.Kind() { + case reflect.Map: + diffMaps(obj1, obj2, path, result, opts) + case reflect.Slice, reflect.Array: + diffArrays(obj1, obj2, path, result, opts) + default: + if !reflect.DeepEqual(obj1, obj2) { + result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + } else if opts.KeepUnchangedValues { + result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + } + } +} + +func diffMaps(obj1, obj2 interface{}, path string, result map[string]interface{}, opts *Options) { + m1, ok1 := obj1.(map[string]interface{}) + m2, ok2 := obj2.(map[string]interface{}) + + if !ok1 || !ok2 { + result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + return + } + + // Collect all keys + allKeys := make(map[string]bool) + for k := range m1 { + allKeys[k] = true + } + for k := range m2 { + allKeys[k] = true + } + + keys := make([]string, 0, len(allKeys)) + for k := range allKeys { + keys = append(keys, k) + } + + if opts.Sort { + sort.Strings(keys) + } + + for _, key := range keys { + v1, exists1 := m1[key] + v2, exists2 := m2[key] + + newPath := key + if path != "" { + newPath = path + "." + key + } + + if !exists1 { + result[newPath] = map[string]interface{}{"__new": v2} + } else if !exists2 { + result[newPath] = map[string]interface{}{"__old": v1} + } else { + diff(v1, v2, newPath, result, opts) + } + } +} + +func diffArrays(obj1, obj2 interface{}, path string, result map[string]interface{}, opts *Options) { + v1 := reflect.ValueOf(obj1) + v2 := reflect.ValueOf(obj2) + + // Sort arrays if option is enabled + arr1 := obj1 + arr2 := obj2 + + if opts.SortArrays { + arr1 = sortArrayIfPrimitive(obj1) + arr2 = sortArrayIfPrimitive(obj2) + v1 = reflect.ValueOf(arr1) + v2 = reflect.ValueOf(arr2) + } + + len1 := v1.Len() + len2 := v2.Len() + + maxLen := len1 + if len2 > maxLen { + maxLen = len2 + } + + for i := 0; i < maxLen; i++ { + newPath := fmt.Sprintf("%s[%d]", path, i) + + if i >= len1 { + result[newPath] = map[string]interface{}{"__new": v2.Index(i).Interface()} + } else if i >= len2 { + result[newPath] = map[string]interface{}{"__old": v1.Index(i).Interface()} + } else { + diff(v1.Index(i).Interface(), v2.Index(i).Interface(), newPath, result, opts) + } + } +} + +func collectDiffs(obj1, obj2 interface{}, path string) []Diff { + var diffs []Diff + collectDiffsRec(obj1, obj2, path, &diffs) + return diffs +} + +func collectDiffsRec(obj1, obj2 interface{}, path string, diffs *[]Diff) { + if obj1 == nil && obj2 == nil { + *diffs = append(*diffs, Diff{Type: DiffEqual, Path: path, NewValue: obj2}) + return + } + + if obj1 == nil { + *diffs = append(*diffs, Diff{Type: DiffAdd, Path: path, NewValue: obj2}) + return + } + + if obj2 == nil { + *diffs = append(*diffs, Diff{Type: DiffDelete, Path: path, OldValue: obj1}) + return + } + + v1 := reflect.ValueOf(obj1) + v2 := reflect.ValueOf(obj2) + + if v1.Kind() != v2.Kind() { + *diffs = append(*diffs, Diff{Type: DiffUpdate, Path: path, OldValue: obj1, NewValue: obj2}) + return + } + + switch v1.Kind() { + case reflect.Map: + collectMapDiffs(obj1, obj2, path, diffs) + case reflect.Slice, reflect.Array: + collectArrayDiffs(obj1, obj2, path, diffs) + default: + if !reflect.DeepEqual(obj1, obj2) { + *diffs = append(*diffs, Diff{Type: DiffUpdate, Path: path, OldValue: obj1, NewValue: obj2}) + } else { + *diffs = append(*diffs, Diff{Type: DiffEqual, Path: path, NewValue: obj2}) + } + } +} + +func collectMapDiffs(obj1, obj2 interface{}, path string, diffs *[]Diff) { + m1, ok1 := obj1.(map[string]interface{}) + m2, ok2 := obj2.(map[string]interface{}) + + if !ok1 || !ok2 { + *diffs = append(*diffs, Diff{Type: DiffUpdate, Path: path, OldValue: obj1, NewValue: obj2}) + return + } + + allKeys := make(map[string]bool) + for k := range m1 { + allKeys[k] = true + } + for k := range m2 { + allKeys[k] = true + } + + keys := make([]string, 0, len(allKeys)) + for k := range allKeys { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, key := range keys { + v1, exists1 := m1[key] + v2, exists2 := m2[key] + + newPath := key + if path != "" { + newPath = path + "." + key + } + + if !exists1 { + *diffs = append(*diffs, Diff{Type: DiffAdd, Path: newPath, NewValue: v2}) + } else if !exists2 { + *diffs = append(*diffs, Diff{Type: DiffDelete, Path: newPath, OldValue: v1}) + } else { + collectDiffsRec(v1, v2, newPath, diffs) + } + } +} + +func collectArrayDiffs(obj1, obj2 interface{}, path string, diffs *[]Diff) { + v1 := reflect.ValueOf(obj1) + v2 := reflect.ValueOf(obj2) + + len1 := v1.Len() + len2 := v2.Len() + + maxLen := len1 + if len2 > maxLen { + maxLen = len2 + } + + for i := 0; i < maxLen; i++ { + newPath := fmt.Sprintf("%s[%d]", path, i) + + if i >= len1 { + *diffs = append(*diffs, Diff{Type: DiffAdd, Path: newPath, NewValue: v2.Index(i).Interface()}) + } else if i >= len2 { + *diffs = append(*diffs, Diff{Type: DiffDelete, Path: newPath, OldValue: v1.Index(i).Interface()}) + } else { + collectDiffsRec(v1.Index(i).Interface(), v2.Index(i).Interface(), newPath, diffs) + } + } +} + +func sortArrayIfPrimitive(arr interface{}) interface{} { + v := reflect.ValueOf(arr) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return arr + } + + if v.Len() == 0 { + return arr + } + + // Check if array contains only primitives + firstElem := v.Index(0).Interface() + if !isPrimitive(firstElem) { + return arr + } + + // Create a copy and sort it + slice := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + slice[i] = v.Index(i).Interface() + } + + sort.Slice(slice, func(i, j int) bool { + return comparePrimitives(slice[i], slice[j]) < 0 + }) + + return slice +} + +func isPrimitive(v interface{}) bool { + if v == nil { + return true + } + + switch v.(type) { + case bool, string, int, int8, int16, int32, int64, + uint, uint8, uint16, uint32, uint64, + float32, float64: + return true + default: + return false + } +} + +func comparePrimitives(a, b interface{}) int { + if a == nil && b == nil { + return 0 + } + if a == nil { + return -1 + } + if b == nil { + return 1 + } + + // Compare by type first + typeA := fmt.Sprintf("%T", a) + typeB := fmt.Sprintf("%T", b) + + if typeA != typeB { + return strings.Compare(typeA, typeB) + } + + // Compare by value + switch v := a.(type) { + case bool: + if v == b.(bool) { + return 0 + } + if v { + return 1 + } + return -1 + case string: + return strings.Compare(v, b.(string)) + case int: + bv := b.(int) + if v < bv { + return -1 + } else if v > bv { + return 1 + } + return 0 + case int64: + bv := b.(int64) + if v < bv { + return -1 + } else if v > bv { + return 1 + } + return 0 + case float64: + bv := b.(float64) + if v < bv { + return -1 + } else if v > bv { + return 1 + } + return 0 + default: + // Fallback to string comparison + return strings.Compare(fmt.Sprintf("%v", a), fmt.Sprintf("%v", b)) + } +} + +func formatValue(v interface{}) string { + if v == nil { + return "null" + } + + switch val := v.(type) { + case string: + return fmt.Sprintf(`"%s"`, val) + case map[string]interface{}, []interface{}: + b, _ := json.Marshal(val) + return string(b) + default: + return fmt.Sprintf("%v", val) + } +} diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 5fec668c..d50ba10a 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -30,6 +30,7 @@ import ( "syscall" "time" + "github.com/erigontech/rpc-tests/cmd/integration/jsondiff" "github.com/golang-jwt/jwt/v5" "github.com/gorilla/websocket" "github.com/josephburnett/jd/v2" @@ -286,8 +287,10 @@ func extractArchive(archivePath string, sanitizeExtension bool, metrics *TestMet return nil, fmt.Errorf("archive must contain a single JSON file, found %s", header.Name) } + bufferedReader := bufio.NewReaderSize(tarReader, 8*os.Getpagesize()) + start := time.Now() - if err := jsoniter.NewDecoder(tarReader).Decode(&jsonrpcCommands); err != nil { + if err := jsoniter.NewDecoder(bufferedReader).Decode(&jsonrpcCommands); err != nil { return jsonrpcCommands, errors.New("cannot parse JSON " + archivePath + ": " + err.Error()) } metrics.UnmarshallingTime += time.Since(start) @@ -301,10 +304,11 @@ const ( JdLibrary JsonDiffKind = iota JsonDiffTool DiffTool + JsonDiffGo ) func (k JsonDiffKind) String() string { - return [...]string{"jd", "json-diff", "diff"}[k] + return [...]string{"jd", "json-diff", "diff", "json-diff-go"}[k] } // ParseJsonDiffKind converts a string into a JsonDiffKind enum type @@ -316,6 +320,8 @@ func ParseJsonDiffKind(s string) (JsonDiffKind, error) { return JsonDiffTool, nil case "diff": return DiffTool, nil + case "json-diff-go": + return JsonDiffGo, nil default: return JdLibrary, fmt.Errorf("invalid JsonDiffKind value: %s", s) } @@ -368,9 +374,10 @@ type TestMetrics struct { } type TestOutcome struct { - Success bool - Error error - Metrics TestMetrics + Success bool + Error error + ColoredDiff string + Metrics TestMetrics } type TestResult struct { @@ -429,7 +436,7 @@ func NewConfig() *Config { DisplayOnlyFail: false, TransportType: "http", Parallel: true, - DiffKind: JdLibrary, + DiffKind: JsonDiffGo, WithoutCompareResults: false, WaitingTime: 0, DoNotCompareError: false, @@ -1573,11 +1580,57 @@ func (c *JsonRpcCommand) processResponse(response, result1, responseInFile any, return } - same, err := c.compareJSON(config, daemonFile, expRspFile, diffFile, &outcome.Metrics) - if err != nil { - outcome.Error = err - return + var same bool + if config.DiffKind == JsonDiffGo { // TODO: move within compareJSON + outcome.Metrics.ComparisonCount += 1 + opts := &jsondiff.Options{ + SortArrays: true, + } + if respIsMap && expIsMap { + diff := jsondiff.DiffJSON(expectedMap, responseMap, opts) + same = len(diff) == 0 + diffString := jsondiff.DiffString(expectedMap, responseMap, opts) + err = os.WriteFile(diffFile, []byte(diffString), 0644) + if err != nil { + outcome.Error = err + return + } + if !same { + outcome.Error = errDiffMismatch + if config.ReqTestNumber != -1 { // only when a single test is run TODO: add option to control it + outcome.ColoredDiff = jsondiff.ColoredString(expectedMap, responseMap, opts) + } + } + } else { + responseArray, respIsArray := response.([]any) + expectedArray, expIsArray := expectedResponse.([]any) + if !respIsArray || !expIsArray { + outcome.Error = errors.New("cannot compare JSON objects (neither maps nor arrays)") + return + } + diff := jsondiff.DiffJSON(expectedArray, responseArray, opts) + same = len(diff) == 0 + diffString := jsondiff.DiffString(expectedArray, responseArray, opts) + err = os.WriteFile(diffFile, []byte(diffString), 0644) + if err != nil { + outcome.Error = err + return + } + if !same { + outcome.Error = errDiffMismatch + if config.ReqTestNumber != -1 { // only when a single test is run TODO: add option to control it + outcome.ColoredDiff = jsondiff.ColoredString(expectedArray, responseArray, opts) + } + } + } + } else { + same, err = c.compareJSON(config, daemonFile, expRspFile, diffFile, &outcome.Metrics) + if err != nil { + outcome.Error = err + return + } } + if same && !config.ForceDumpJSONs { err := os.Remove(daemonFile) if err != nil { @@ -1754,7 +1807,14 @@ func (c *ResultCollector) start(ctx context.Context, cancelCtx context.CancelFun c.totalEqualCount += result.Outcome.Metrics.EqualCount } else { c.failedTests++ - fmt.Printf("failed: %s\n", result.Outcome.Error.Error()) + if result.Outcome.Error != nil { + fmt.Printf("failed: %s\n", result.Outcome.Error.Error()) + if errors.Is(result.Outcome.Error, errDiffMismatch) && result.Outcome.ColoredDiff != "" { + fmt.Printf(result.Outcome.ColoredDiff) + } + } else { + fmt.Printf("failed: no error\n") + } if c.config.ExitOnFail { // Signal other tasks to stop and exit cancelCtx() From 57c9c2fcfc12b5556bb30a61235d1b682bf79c6a Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sat, 10 Jan 2026 17:52:34 +0100 Subject: [PATCH 52/87] add print on failed --- cmd/integration/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index d50ba10a..0408bb82 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1945,6 +1945,7 @@ func runMain() int { var retryDelay = 1 * time.Second latestBlock, err := getConsistentLatestBlock(config, server1, config.ExternalProviderURL, maxRetries, retryDelay) if err != nil { + fmt.Printf("sync on latest block number failed ",err) return -1 // TODO: unique return codes? } if config.VerboseLevel > 0 { From d010c9b281335324481ec5e281d3c0f1f3294723 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sat, 10 Jan 2026 17:54:43 +0100 Subject: [PATCH 53/87] add print on failed --- cmd/integration/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 0408bb82..fd34f70f 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1945,7 +1945,7 @@ func runMain() int { var retryDelay = 1 * time.Second latestBlock, err := getConsistentLatestBlock(config, server1, config.ExternalProviderURL, maxRetries, retryDelay) if err != nil { - fmt.Printf("sync on latest block number failed ",err) + fmt.Println("sync on latest block number failed ",err) return -1 // TODO: unique return codes? } if config.VerboseLevel > 0 { From a89814ff048b52085e1edd40f442d9f6f8532441 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sat, 10 Jan 2026 20:13:33 +0100 Subject: [PATCH 54/87] fix getLatest --- cmd/integration/main.go | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index fd34f70f..67cf0c54 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1123,18 +1123,30 @@ func getLatestBlockNumber(ctx context.Context, config *Config, url string, metri } requestBytes, _ := jsoniter.Marshal(request) - var response JsonRpcResponse - err := executeHttpRequest(ctx, config, "http", "", url, requestBytes, response, metrics) + var response any + err := executeHttpRequest(ctx, config, "http", "", url, requestBytes, &response, metrics) if err != nil { return 0, err } - if response.Error != nil { - return 0, fmt.Errorf("RPC error: %s", response.Error.Message) + responseMap, ok := response.(map[string]interface{}) + if !ok { + return 0, fmt.Errorf("response is not a map: %v", response) } + if resultVal, hasResult := responseMap["result"]; hasResult { + resultStr, isString := resultVal.(string) + if !isString { + return 0, fmt.Errorf("result is not a string: %v", resultVal) + } - result := strings.TrimPrefix(response.Result, "0x") - return strconv.ParseUint(result, 16, 64) + cleanHex := strings.TrimPrefix(resultStr, "0x") + fmt.Println("ret:", cleanHex) + return strconv.ParseUint(cleanHex, 16, 64) + } + if errorVal, hasError := responseMap["error"]; hasError { + return 0, fmt.Errorf("RPC error: %v", errorVal) + } + return 0, fmt.Errorf("no result or error found in response") } func getConsistentLatestBlock(config *Config, server1URL, server2URL string, maxRetries int, retryDelay time.Duration) (uint64, error) { @@ -1945,7 +1957,7 @@ func runMain() int { var retryDelay = 1 * time.Second latestBlock, err := getConsistentLatestBlock(config, server1, config.ExternalProviderURL, maxRetries, retryDelay) if err != nil { - fmt.Println("sync on latest block number failed ",err) + fmt.Println("sync on latest block number failed ", err) return -1 // TODO: unique return codes? } if config.VerboseLevel > 0 { From 5b18f6816f15ed306b7c130d3aa8b0e9491a03a8 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Sat, 10 Jan 2026 20:14:32 +0100 Subject: [PATCH 55/87] remove log --- cmd/integration/main.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 67cf0c54..a1078541 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1140,7 +1140,6 @@ func getLatestBlockNumber(ctx context.Context, config *Config, url string, metri } cleanHex := strings.TrimPrefix(resultStr, "0x") - fmt.Println("ret:", cleanHex) return strconv.ParseUint(cleanHex, 16, 64) } if errorVal, hasError := responseMap["error"]; hasError { From d79464b41bfbde2aa29c01ad55127b31119b28a5 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Mon, 12 Jan 2026 11:37:11 +0100 Subject: [PATCH 56/87] integration: add json-diff tests --- cmd/integration/jsondiff/diff.go | 10 +- cmd/integration/jsondiff/diff_test.go | 769 ++++++++++++++++++++++++++ 2 files changed, 774 insertions(+), 5 deletions(-) create mode 100644 cmd/integration/jsondiff/diff_test.go diff --git a/cmd/integration/jsondiff/diff.go b/cmd/integration/jsondiff/diff.go index f6933a05..b6f71cdd 100644 --- a/cmd/integration/jsondiff/diff.go +++ b/cmd/integration/jsondiff/diff.go @@ -26,15 +26,15 @@ type Diff struct { NewValue interface{} } -// Options configures the diff behavior +// Options configures the diff behaviour type Options struct { - // Full causes all unchanged values to be included in output + // Full causes all unchanged values to be included in the output Full bool // KeepUnchangedValues includes unchanged values in the diff result KeepUnchangedValues bool // OutputKeys are the keys to include in the output OutputKeys []string - // Sort keys in output + // Sort keys in the output Sort bool // SortArrays sorts primitive values in arrays before comparing SortArrays bool @@ -205,7 +205,7 @@ func diffArrays(obj1, obj2 interface{}, path string, result map[string]interface v1 := reflect.ValueOf(obj1) v2 := reflect.ValueOf(obj2) - // Sort arrays if option is enabled + // Sort arrays if required arr1 := obj1 arr2 := obj2 @@ -358,7 +358,7 @@ func sortArrayIfPrimitive(arr interface{}) interface{} { return arr } - // Check if array contains only primitives + // Check that the array contains only primitives firstElem := v.Index(0).Interface() if !isPrimitive(firstElem) { return arr diff --git a/cmd/integration/jsondiff/diff_test.go b/cmd/integration/jsondiff/diff_test.go new file mode 100644 index 00000000..4d836c1d --- /dev/null +++ b/cmd/integration/jsondiff/diff_test.go @@ -0,0 +1,769 @@ +package jsondiff + +import ( + "encoding/json" + "strings" + "testing" +) + +func TestDiffJSON_NilInputs(t *testing.T) { + tests := []struct { + name string + obj1 any + obj2 any + opts *Options + }{ + {"both nil", nil, nil, nil}, + {"first nil", nil, map[string]any{"a": 1}, nil}, + {"second nil", map[string]any{"a": 1}, nil, nil}, + {"both nil with keep unchanged", nil, nil, &Options{KeepUnchangedValues: true}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := DiffJSON(tt.obj1, tt.obj2, tt.opts) + if result == nil { + t.Error("expected non-nil result") + } + }) + } +} + +func TestDiffJSON_PrimitiveValues(t *testing.T) { + tests := []struct { + name string + obj1 any + obj2 any + expectDiff bool + keepUnchanged bool + }{ + {"equal strings", "hello", "hello", false, false}, + {"different strings", "hello", "world", true, false}, + {"equal numbers", 42.0, 42.0, false, false}, + {"different numbers", 42.0, 43.0, true, false}, + {"equal bools", true, true, false, false}, + {"different bools", true, false, true, false}, + {"keep unchanged equal", "hello", "hello", false, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + opts := &Options{KeepUnchangedValues: tt.keepUnchanged} + result := DiffJSON(tt.obj1, tt.obj2, opts) + hasDiff := len(result) > 0 + if tt.expectDiff && !hasDiff && !tt.keepUnchanged { + t.Error("expected diff but got none") + } + }) + } +} + +func TestDiffJSON_DifferentTypes(t *testing.T) { + result := DiffJSON("string", 42, nil) + if len(result) == 0 { + t.Error("expected diff for different types") + } +} + +func TestDiffJSON_Maps(t *testing.T) { + tests := []struct { + name string + obj1 map[string]any + obj2 map[string]any + opts *Options + }{ + { + "equal maps", + map[string]any{"a": 1, "b": 2}, + map[string]any{"a": 1, "b": 2}, + nil, + }, + { + "added key", + map[string]any{"a": 1}, + map[string]any{"a": 1, "b": 2}, + nil, + }, + { + "removed key", + map[string]any{"a": 1, "b": 2}, + map[string]any{"a": 1}, + nil, + }, + { + "changed value", + map[string]any{"a": 1}, + map[string]any{"a": 2}, + nil, + }, + { + "sorted keys", + map[string]any{"b": 1, "a": 2}, + map[string]any{"a": 2, "b": 1}, + &Options{Sort: true}, + }, + { + "nested maps", + map[string]any{"a": map[string]any{"b": 1}}, + map[string]any{"a": map[string]any{"b": 2}}, + nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := DiffJSON(tt.obj1, tt.obj2, tt.opts) + if result == nil { + t.Error("expected non-nil result") + } + }) + } +} + +func TestDiffJSON_Arrays(t *testing.T) { + tests := []struct { + name string + obj1 any + obj2 any + opts *Options + }{ + { + "equal arrays", + []any{1, 2, 3}, + []any{1, 2, 3}, + nil, + }, + { + "different arrays", + []any{1, 2, 3}, + []any{1, 2, 4}, + nil, + }, + { + "longer second array", + []any{1, 2}, + []any{1, 2, 3}, + nil, + }, + { + "shorter second array", + []any{1, 2, 3}, + []any{1, 2}, + nil, + }, + { + "sorted arrays", + []any{3, 1, 2}, + []any{1, 2, 3}, + &Options{SortArrays: true}, + }, + { + "empty arrays", + []any{}, + []any{}, + nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := DiffJSON(tt.obj1, tt.obj2, tt.opts) + if result == nil { + t.Error("expected non-nil result") + } + }) + } +} + +func TestDiffJSON_NonStringKeyMaps(t *testing.T) { + // Test with non-map[string]any types + obj1 := map[string]any{"a": 1} + obj2 := "not a map" + + result := DiffJSON(obj1, obj2, nil) + if len(result) == 0 { + t.Error("expected diff for different types") + } +} + +func TestDiffString(t *testing.T) { + tests := []struct { + name string + obj1 any + obj2 any + opts *Options + contains []string + }{ + { + "added value", + map[string]any{}, + map[string]any{"a": 1}, + nil, + []string{"+", "a"}, + }, + { + "deleted value", + map[string]any{"a": 1}, + map[string]any{}, + nil, + []string{"-", "a"}, + }, + { + "updated value", + map[string]any{"a": 1}, + map[string]any{"a": 2}, + nil, + []string{"~", "a", "->"}, + }, + { + "full output with equal", + map[string]any{"a": 1}, + map[string]any{"a": 1}, + &Options{Full: true}, + []string{"a"}, + }, + { + "nil options", + map[string]any{"a": 1}, + map[string]any{"a": 2}, + nil, + []string{"~"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := DiffString(tt.obj1, tt.obj2, tt.opts) + for _, substr := range tt.contains { + if !strings.Contains(result, substr) { + t.Errorf("expected result to contain %q, got: %s", substr, result) + } + } + }) + } +} + +func TestColoredString(t *testing.T) { + tests := []struct { + name string + obj1 any + obj2 any + opts *Options + contains []string + }{ + { + "added value green", + map[string]any{}, + map[string]any{"a": 1}, + nil, + []string{"\033[32m", "+"}, + }, + { + "deleted value red", + map[string]any{"a": 1}, + map[string]any{}, + nil, + []string{"\033[31m", "-"}, + }, + { + "updated value yellow", + map[string]any{"a": 1}, + map[string]any{"a": 2}, + nil, + []string{"\033[33m", "~"}, + }, + { + "full output with equal", + map[string]any{"a": 1}, + map[string]any{"a": 1}, + &Options{Full: true}, + []string{"a"}, + }, + { + "nil options", + map[string]any{"a": 1}, + map[string]any{"a": 2}, + nil, + []string{"\033[0m"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ColoredString(tt.obj1, tt.obj2, tt.opts) + for _, substr := range tt.contains { + if !strings.Contains(result, substr) { + t.Errorf("expected result to contain %q, got: %s", substr, result) + } + } + }) + } +} + +func TestCollectDiffs(t *testing.T) { + tests := []struct { + name string + obj1 any + obj2 any + expectedType DiffType + }{ + {"both nil", nil, nil, DiffEqual}, + {"first nil", nil, "value", DiffAdd}, + {"second nil", "value", nil, DiffDelete}, + {"different types", "string", 42, DiffUpdate}, + {"equal primitives", "hello", "hello", DiffEqual}, + {"different primitives", "hello", "world", DiffUpdate}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + diffs := collectDiffs(tt.obj1, tt.obj2, "") + if len(diffs) == 0 { + t.Error("expected at least one diff") + return + } + if diffs[0].Type != tt.expectedType { + t.Errorf("expected type %v, got %v", tt.expectedType, diffs[0].Type) + } + }) + } +} + +func TestCollectMapDiffs(t *testing.T) { + tests := []struct { + name string + obj1 any + obj2 any + }{ + { + "non-map types", + "not a map", + "also not a map", + }, + { + "nested maps", + map[string]any{"a": map[string]any{"b": 1}}, + map[string]any{"a": map[string]any{"b": 2}}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + diffs := collectDiffs(tt.obj1, tt.obj2, "") + if diffs == nil { + t.Error("expected non-nil diffs") + } + }) + } +} + +func TestCollectArrayDiffs(t *testing.T) { + tests := []struct { + name string + obj1 any + obj2 any + }{ + { + "equal arrays", + []any{1, 2, 3}, + []any{1, 2, 3}, + }, + { + "first longer", + []any{1, 2, 3}, + []any{1, 2}, + }, + { + "second longer", + []any{1, 2}, + []any{1, 2, 3}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + diffs := collectDiffs(tt.obj1, tt.obj2, "") + if diffs == nil { + t.Error("expected non-nil diffs") + } + }) + } +} + +func TestSortArrayIfPrimitive(t *testing.T) { + tests := []struct { + name string + input any + expected any + }{ + {"non-slice", "string", "string"}, + {"empty slice", []any{}, []any{}}, + {"primitive ints", []any{3, 1, 2}, []any{1, 2, 3}}, + {"primitive strings", []any{"c", "a", "b"}, []any{"a", "b", "c"}}, + {"non-primitive", []any{map[string]any{"a": 1}}, []any{map[string]any{"a": 1}}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := sortArrayIfPrimitive(tt.input) + if result == nil { + t.Error("expected non-nil result") + } + }) + } +} + +func TestIsPrimitive(t *testing.T) { + tests := []struct { + name string + input any + expected bool + }{ + {"nil", nil, true}, + {"bool", true, true}, + {"string", "hello", true}, + {"int", 42, true}, + {"int8", int8(42), true}, + {"int16", int16(42), true}, + {"int32", int32(42), true}, + {"int64", int64(42), true}, + {"uint", uint(42), true}, + {"uint8", uint8(42), true}, + {"uint16", uint16(42), true}, + {"uint32", uint32(42), true}, + {"uint64", uint64(42), true}, + {"float32", float32(3.14), true}, + {"float64", 3.14, true}, + {"map", map[string]any{}, false}, + {"slice", []any{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := isPrimitive(tt.input) + if result != tt.expected { + t.Errorf("expected %v, got %v", tt.expected, result) + } + }) + } +} + +func TestComparePrimitives(t *testing.T) { + tests := []struct { + name string + a any + b any + expected int + }{ + {"both nil", nil, nil, 0}, + {"first nil", nil, "a", -1}, + {"second nil", "a", nil, 1}, + {"different types", "a", 1, 1}, // string > int by type name + {"equal bools true", true, true, 0}, + {"equal bools false", false, false, 0}, + {"true > false", true, false, 1}, + {"false < true", false, true, -1}, + {"equal strings", "hello", "hello", 0}, + {"string less", "a", "b", -1}, + {"string greater", "b", "a", 1}, + {"equal ints", 42, 42, 0}, + {"int less", 1, 2, -1}, + {"int greater", 2, 1, 1}, + {"equal int64", int64(42), int64(42), 0}, + {"int64 less", int64(1), int64(2), -1}, + {"int64 greater", int64(2), int64(1), 1}, + {"equal float64", 3.14, 3.14, 0}, + {"float64 less", 1.0, 2.0, -1}, + {"float64 greater", 2.0, 1.0, 1}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := comparePrimitives(tt.a, tt.b) + if result != tt.expected { + t.Errorf("expected %v, got %v", tt.expected, result) + } + }) + } +} + +func TestComparePrimitives_Fallback(t *testing.T) { + // Test fallback case with unknown type + type customType struct { + value int + } + a := customType{value: 1} + b := customType{value: 2} + + result := comparePrimitives(a, b) + // Should use string comparison fallback + if result == 0 { + t.Error("expected non-zero result for different values") + } +} + +func TestFormatValue(t *testing.T) { + tests := []struct { + name string + input any + expected string + }{ + {"nil", nil, "null"}, + {"string", "hello", `"hello"`}, + {"number", 42, "42"}, + {"float", 3.14, "3.14"}, + {"bool", true, "true"}, + {"map", map[string]any{"a": 1}, `{"a":1}`}, + {"slice", []any{1, 2, 3}, "[1,2,3]"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := formatValue(tt.input) + if result != tt.expected { + t.Errorf("expected %q, got %q", tt.expected, result) + } + }) + } +} + +func TestDiffJSON_ComplexNested(t *testing.T) { + obj1 := map[string]any{ + "users": []any{ + map[string]any{ + "name": "Alice", + "age": 30, + }, + map[string]any{ + "name": "Bob", + "age": 25, + }, + }, + "metadata": map[string]any{ + "version": "1.0", + "count": 2, + }, + } + + obj2 := map[string]any{ + "users": []any{ + map[string]any{ + "name": "Alice", + "age": 31, // changed + }, + map[string]any{ + "name": "Bob", + "age": 25, + }, + map[string]any{ + "name": "Charlie", // added + "age": 35, + }, + }, + "metadata": map[string]any{ + "version": "1.1", // changed + "count": 3, // changed + }, + } + + result := DiffJSON(obj1, obj2, &Options{Sort: true}) + if len(result) == 0 { + t.Error("expected diffs for nested changes") + } +} + +func TestDiffJSON_WithJSONUnmarshal(t *testing.T) { + json1 := `{"name": "test", "value": 42}` + json2 := `{"name": "test", "value": 43, "extra": true}` + + var obj1, obj2 map[string]any + if err := json.Unmarshal([]byte(json1), &obj1); err != nil { + t.Fatalf("failed to unmarshal json1: %v", err) + } + if err := json.Unmarshal([]byte(json2), &obj2); err != nil { + t.Fatalf("failed to unmarshal json2: %v", err) + } + + result := DiffJSON(obj1, obj2, nil) + if len(result) == 0 { + t.Error("expected diffs") + } +} + +func TestDiffTypes(t *testing.T) { + // Ensure all DiffType constants are defined + types := []DiffType{DiffAdd, DiffDelete, DiffUpdate, DiffEqual} + expectedValues := []string{"add", "delete", "update", "equal"} + + for i, dt := range types { + if string(dt) != expectedValues[i] { + t.Errorf("expected %q, got %q", expectedValues[i], string(dt)) + } + } +} + +func TestDiffStruct(t *testing.T) { + // Test the Diff struct fields + d := Diff{ + Type: DiffUpdate, + Path: "test.path", + OldValue: 1, + NewValue: 2, + } + + if d.Type != DiffUpdate { + t.Errorf("expected DiffUpdate, got %v", d.Type) + } + if d.Path != "test.path" { + t.Errorf("expected test.path, got %v", d.Path) + } + if d.OldValue != 1 { + t.Errorf("expected 1, got %v", d.OldValue) + } + if d.NewValue != 2 { + t.Errorf("expected 2, got %v", d.NewValue) + } +} + +func TestOptions(t *testing.T) { + // Test the Options struct fields + opts := Options{ + Full: true, + KeepUnchangedValues: true, + OutputKeys: []string{"a", "b"}, + Sort: true, + SortArrays: true, + } + + if !opts.Full { + t.Error("expected Full to be true") + } + if !opts.KeepUnchangedValues { + t.Error("expected KeepUnchangedValues to be true") + } + if len(opts.OutputKeys) != 2 { + t.Errorf("expected 2 output keys, got %d", len(opts.OutputKeys)) + } + if !opts.Sort { + t.Error("expected Sort to be true") + } + if !opts.SortArrays { + t.Error("expected SortArrays to be true") + } +} + +func TestDiffMaps_NonStringKeyMap(t *testing.T) { + // Test diffMaps with invalid map types + result := make(map[string]any) + diffMaps("not a map", "also not a map", "", result, &Options{}) + if len(result) == 0 { + t.Error("expected result for non-map types") + } +} + +func TestDiffArrays_SortArraysOption(t *testing.T) { + obj1 := []any{3, 1, 2} + obj2 := []any{1, 2, 3} + + result := DiffJSON(obj1, obj2, &Options{SortArrays: true}) + // After sorting, arrays should be equal + if len(result) != 0 { + t.Errorf("expected no diff for sorted arrays, got result: %v", result) + } +} + +func TestCollectDiffs_Path(t *testing.T) { + obj1 := map[string]any{ + "level1": map[string]any{ + "level2": "value1", + }, + } + obj2 := map[string]any{ + "level1": map[string]any{ + "level2": "value2", + }, + } + + diffs := collectDiffs(obj1, obj2, "") + found := false + for _, d := range diffs { + if d.Path == "level1.level2" && d.Type == DiffUpdate { + found = true + break + } + } + if !found { + t.Error("expected to find diff at level1.level2") + } +} + +func TestSortArrayIfPrimitive_MixedPrimitives(t *testing.T) { + // Test sorting with mixed primitive types + input := []any{"b", "a", "c"} + result := sortArrayIfPrimitive(input) + + resultSlice, ok := result.([]any) + if !ok { + t.Fatal("expected slice result") + } + + if resultSlice[0] != "a" || resultSlice[1] != "b" || resultSlice[2] != "c" { + t.Errorf("expected sorted slice [a, b, c], got %v", resultSlice) + } +} + +func TestDiffJSON_ArrayInMap(t *testing.T) { + obj1 := map[string]any{ + "items": []any{"a", "b"}, + } + obj2 := map[string]any{ + "items": []any{"a", "b", "c"}, + } + + result := DiffJSON(obj1, obj2, nil) + if len(result) == 0 { + t.Error("expected diff for array change in map") + } + + // Check that the result contains the expected added value + found := false + for path, val := range result { + if path == "items[2]" { + if diffMap, ok := val.(map[string]any); ok { + if diffMap["__new"] == "c" { + found = true + break + } + } + } + } + if !found { + t.Errorf("expected to find added item 'c' at items[2], got: %v", result) + } +} + +func TestDiffJSON_EmptyMap(t *testing.T) { + obj1 := map[string]any{} + obj2 := map[string]any{} + + result := DiffJSON(obj1, obj2, nil) + if len(result) != 0 { + t.Errorf("expected no diffs for equal empty maps, got %v", result) + } +} + +func TestDiffString_NilBoth(t *testing.T) { + result := DiffString(nil, nil, nil) + // Both nil should show as equal + if result != "" { + t.Errorf("expected no diffs for both nil, got %v", result) + } +} + +func TestColoredString_NilBoth(t *testing.T) { + result := ColoredString(nil, nil, nil) + // Both nil should show as equal + if result != "" { + t.Errorf("expected no diffs for both nil, got %v", result) + } +} From 9cf86417858b61e70150d018e5f782056ed31c56 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Mon, 12 Jan 2026 21:50:15 +0100 Subject: [PATCH 57/87] integration: refactor archive module --- cmd/integration/archive/archive.go | 143 ++++++ cmd/integration/archive/archive_test.go | 559 ++++++++++++++++++++++++ cmd/integration/main.go | 163 ++----- 3 files changed, 729 insertions(+), 136 deletions(-) create mode 100644 cmd/integration/archive/archive.go create mode 100644 cmd/integration/archive/archive_test.go diff --git a/cmd/integration/archive/archive.go b/cmd/integration/archive/archive.go new file mode 100644 index 00000000..5d87289f --- /dev/null +++ b/cmd/integration/archive/archive.go @@ -0,0 +1,143 @@ +package archive + +import ( + "archive/tar" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "strings" +) + +// Compression defines the supported compression types +type Compression int + +const ( + GzipCompression Compression = iota + Bzip2Compression + NoCompression +) + +func (c Compression) String() string { + return [...]string{"gzip", "bzip2", "none"}[c] +} + +func (c Compression) Extension() string { + return [...]string{".gz", ".bz2", ""}[c] +} + +// getCompressionKind determines the compression from the filename extension. +func getCompressionKind(filename string) Compression { + if strings.HasSuffix(filename, ".tar.gz") || strings.HasSuffix(filename, ".tgz") { + return GzipCompression + } + if strings.HasSuffix(filename, ".tar.bz2") || strings.HasSuffix(filename, ".tbz") { + return Bzip2Compression + } + return NoCompression +} + +// autodetectCompression attempts to detect the compression type of the input file +func autodetectCompression(inFile *os.File) (Compression, error) { + compressionType := NoCompression + tarReader := tar.NewReader(inFile) + _, err := tarReader.Next() + if err != nil && !errors.Is(err, io.EOF) { + // Reset the file position and check if it's gzip encoded + _, err = inFile.Seek(0, io.SeekStart) + if err != nil { + return compressionType, err + } + _, err = gzip.NewReader(inFile) + if err == nil { + compressionType = GzipCompression + } else { + // Reset the file position and check if it's bzip2 encoded + _, err = inFile.Seek(0, io.SeekStart) + if err != nil { + return compressionType, err + } + _, err = tar.NewReader(bzip2.NewReader(inFile)).Next() + if err == nil { + compressionType = Bzip2Compression + } + } + } + return compressionType, nil +} + +// ExtractAndApply extracts a compressed or uncompressed tar archive and applies the given function to it. +func ExtractAndApply(archivePath string, sanitizeExtension bool, f func(*tar.Reader) error) error { + inputFile, err := os.Open(archivePath) + if err != nil { + return fmt.Errorf("failed to open archive: %w", err) + } + defer func(inputFile *os.File) { + err = inputFile.Close() + if err != nil { + fmt.Printf("Warning: failed to close input file: %v", err) + } + }(inputFile) + + // If the archive appears to be uncompressed, try to autodetect any compression type + compressionKind := getCompressionKind(archivePath) + if compressionKind == NoCompression { + compressionKind, err = autodetectCompression(inputFile) + if err != nil { + return fmt.Errorf("failed to autodetect compression for archive: %w", err) + } + // Check if we are required to sanitise the extension for compressed archives + if compressionKind != NoCompression && sanitizeExtension { + err = os.Rename(archivePath, archivePath+compressionKind.Extension()) + if err != nil { + return err + } + archivePath = archivePath + compressionKind.Extension() + } + // Reopening the file is necessary to reset the position and also because of potential renaming + inputFile, err = os.Open(archivePath) + if err != nil { + return err + } + } + + var reader io.Reader + switch compressionKind { + case GzipCompression: + gzReader, err := gzip.NewReader(inputFile) + if err != nil { + return fmt.Errorf("failed to create gzip reader: %w", err) + } + defer func(gzReader *gzip.Reader) { + err = gzReader.Close() + if err != nil { + fmt.Printf("Warning: failed to close gzip reader: %v", err) + } + }(gzReader) + reader = gzReader + case Bzip2Compression: + reader = bzip2.NewReader(inputFile) + case NoCompression: + reader = inputFile + } + + tarReader := tar.NewReader(reader) + header, err := tarReader.Next() + if err == io.EOF { + return fmt.Errorf("archive is empty") + } + if err != nil { + return fmt.Errorf("failed to read tar header: %w", err) + } + if header.Typeflag != tar.TypeReg { + return fmt.Errorf("expected regular file in archive, got type %v", header.Typeflag) + } + + if err = f(tarReader); err != nil { + return err + } + + return nil +} diff --git a/cmd/integration/archive/archive_test.go b/cmd/integration/archive/archive_test.go new file mode 100644 index 00000000..6cb026e7 --- /dev/null +++ b/cmd/integration/archive/archive_test.go @@ -0,0 +1,559 @@ +package archive + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "errors" + "io" + "os" + "path/filepath" + "testing" + + "github.com/dsnet/compress/bzip2" +) + +// Helper functions + +func closeFile(t *testing.T, file *os.File) { + t.Helper() + + err := file.Close() + if err != nil { + t.Fatalf("failed to close file %s: %v", file.Name(), err) + } +} + +func createTempTarFile(t *testing.T, content string, compression Compression) string { + t.Helper() + + tmpFile, err := os.CreateTemp("", "test_*.tar") + if err != nil { + t.Fatalf("failed to create temp file: %v", err) + } + + var writer io.Writer = tmpFile + var gzWriter *gzip.Writer + var bzWriter *bzip2.Writer + + if compression == GzipCompression { + gzWriter = gzip.NewWriter(tmpFile) + writer = gzWriter + } else if compression == Bzip2Compression { + bzWriter, err = bzip2.NewWriter(tmpFile, &bzip2.WriterConfig{Level: bzip2.BestCompression}) + if err != nil { + t.Fatalf("failed to create bzip2 writer: %v", err) + } + writer = bzWriter + } + + tarWriter := tar.NewWriter(writer) + + contentBytes := []byte(content) + header := &tar.Header{ + Name: "test.json", + Size: int64(len(contentBytes)), + Mode: 0644, + } + + if err := tarWriter.WriteHeader(header); err != nil { + t.Fatalf("failed to write tar header: %v", err) + } + if _, err := tarWriter.Write(contentBytes); err != nil { + t.Fatalf("failed to write tar content: %v", err) + } + + err = tarWriter.Close() + if err != nil { + t.Fatalf("failed to close tar writer: %v", err) + } + if gzWriter != nil { + err = gzWriter.Close() + if err != nil { + t.Fatalf("failed to close gzip writer: %v", err) + } + } + if bzWriter != nil { + err = bzWriter.Close() + if err != nil { + t.Fatalf("failed to close bzip2 writer: %v", err) + } + } + err = tmpFile.Close() + if err != nil { + t.Fatalf("failed to close temp file: %v", err) + } + + return tmpFile.Name() +} + +func createTempTarWithJSON(t *testing.T, compression Compression) string { + t.Helper() + + jsonContent := `[{"request":"dGVzdA==","response":{"result":"ok"},"result":"ok"}]` + return createTempTarFile(t, jsonContent, compression) +} + +func createEmptyTarFile(t *testing.T) string { + t.Helper() + + tmpFile, err := os.CreateTemp("", "empty_*.tar") + if err != nil { + t.Fatalf("failed to create temp file: %v", err) + } + + return tmpFile.Name() +} + +func createTempTarWithDirectory(t *testing.T) string { + t.Helper() + + tmpDir := t.TempDir() + tmpFile := filepath.Join(tmpDir, "dir.tar") + + file, err := os.Create(tmpFile) + if err != nil { + t.Fatalf("failed to create temp file: %v", err) + } + + tarWriter := tar.NewWriter(file) + + header := &tar.Header{ + Name: "testdir/", + Typeflag: tar.TypeDir, + Mode: 0755, + } + + if err := tarWriter.WriteHeader(header); err != nil { + t.Fatalf("failed to write tar header: %v", err) + } + + err = tarWriter.Close() + if err != nil { + return "" + } + defer closeFile(t, file) + + return tmpFile +} + +func removeTempFile(t *testing.T, path string) { + err := os.Remove(path) + if err != nil && !os.IsNotExist(err) { + t.Fatalf("failed to remove temp file: %v", err) + } +} + +func TestGetCompressionType(t *testing.T) { + tests := []struct { + name string + filename string + expected Compression + }{ + {"tar.gz extension", "file.tar.gz", GzipCompression}, + {"tgz extension", "file.tgz", GzipCompression}, + {"tar.bz2 extension", "file.tar.bz2", Bzip2Compression}, + {"tbz extension", "file.tbz", Bzip2Compression}, + {"tar extension", "file.tar", NoCompression}, + {"json extension", "file.json", NoCompression}, + {"no extension", "file", NoCompression}, + {"path with tar.gz", "/path/to/file.tar.gz", GzipCompression}, + {"path with tgz", "/path/to/file.tgz", GzipCompression}, + {"path with tar.bz2", "/path/to/file.tar.bz2", Bzip2Compression}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getCompressionKind(tt.filename) + if result != tt.expected { + t.Errorf("expected %q, got %q", tt.expected, result) + } + }) + } +} + +func TestAutodetectCompression_UncompressedTar(t *testing.T) { + tmpFilePath := createTempTarWithJSON(t, NoCompression) + defer removeTempFile(t, tmpFilePath) + + file, err := os.Open(tmpFilePath) + if err != nil { + t.Fatalf("failed to open temp file: %v", err) + } + defer closeFile(t, file) + + compressionType, err := autodetectCompression(file) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if compressionType != NoCompression { + t.Errorf("expected NoCompression, got %q", compressionType) + } +} + +func TestAutodetectCompression_GzipTar(t *testing.T) { + tmpFile := createTempTarWithJSON(t, GzipCompression) + defer removeTempFile(t, tmpFile) + + file, err := os.Open(tmpFile) + if err != nil { + t.Fatalf("failed to open temp file: %v", err) + } + defer closeFile(t, file) + + compressionKind, err := autodetectCompression(file) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if compressionKind != GzipCompression { + t.Errorf("expected GzipCompression, got %q", compressionKind) + } +} + +func TestAutodetectCompression_InvalidFile(t *testing.T) { + tmpFile, err := os.CreateTemp("", "invalid_*.dat") + if err != nil { + t.Fatalf("failed to create temp file: %v", err) + } + defer removeTempFile(t, tmpFile.Name()) + + _, err = tmpFile.Write([]byte("this is not a valid archive")) + if err != nil { + t.Fatalf("failed to write to temp file: %v", err) + } + defer closeFile(t, tmpFile) + + file, err := os.Open(tmpFile.Name()) + if err != nil { + t.Fatalf("failed to open temp file: %v", err) + } + defer closeFile(t, file) + + compressionType, err := autodetectCompression(file) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // Invalid data should return NoCompression + if compressionType != NoCompression { + t.Errorf("expected NoCompression for invalid file, got %q", compressionType) + } +} + +var nullTarFunc = func(*tar.Reader) error { return nil } + +func TestExtractAndApply_NonExistentFile(t *testing.T) { + err := ExtractAndApply("/nonexistent/path/file.tar", false, nullTarFunc) + if err == nil { + t.Error("expected error for non-existent file") + } +} + +func TestExtractAndApply_UncompressedTar(t *testing.T) { + tmpFilePath := createTempTarWithJSON(t, NoCompression) + defer removeTempFile(t, tmpFilePath) + + err := ExtractAndApply(tmpFilePath, false, nullTarFunc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestExtractAndApply_GzipTar(t *testing.T) { + tmpFile := createTempTarWithJSON(t, GzipCompression) + defer removeTempFile(t, tmpFile) + + // Rename it to change its extension + newPath := tmpFile + ".tar.gz" + if err := os.Rename(tmpFile, newPath); err != nil { + t.Fatalf("failed to rename file: %v", err) + } + defer removeTempFile(t, newPath) + + err := ExtractAndApply(newPath, false, nullTarFunc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestExtractAndApply_NilMetrics(t *testing.T) { + tmpFile := createTempTarWithJSON(t, NoCompression) + defer removeTempFile(t, tmpFile) + + err := ExtractAndApply(tmpFile, false, nullTarFunc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestExtractAndApply_EmptyArchive(t *testing.T) { + tmpFile := createEmptyTarFile(t) + defer removeTempFile(t, tmpFile) + + // Empty archive should return error since Next() is called internally + err := ExtractAndApply(tmpFile, false, nullTarFunc) + if err == nil { + t.Error("expected error for empty archive") + } +} + +func TestExtractAndApply_InvalidJSON(t *testing.T) { + tmpFile := createTempTarFile(t, "invalid json content", NoCompression) + defer removeTempFile(t, tmpFile) + + err := ExtractAndApply(tmpFile, false, nullTarFunc) + if err != nil { + t.Fatalf("unexpected error from ExtractAndApply: %v", err) + } +} + +func TestExtractAndApply_SanitizeExtension(t *testing.T) { + tmpFile := createTempTarWithJSON(t, GzipCompression) + defer removeTempFile(t, tmpFile) + defer removeTempFile(t, tmpFile+".gz") + + err := ExtractAndApply(tmpFile, true, nullTarFunc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Check that the original file was renamed + if _, err := os.Stat(tmpFile); !os.IsNotExist(err) { + t.Error("expected file to not exist anymore after extraction") + } + if _, err := os.Stat(tmpFile + ".gz"); os.IsNotExist(err) { + t.Error("expected file to be renamed with .gz extension") + } +} + +func TestExtractAndApply_DirectoryInArchive(t *testing.T) { + tmpFile := createTempTarWithDirectory(t) + defer removeTempFile(t, tmpFile) + + err := ExtractAndApply(tmpFile, false, nullTarFunc) + if err == nil { + t.Error("expected error for directory in archive as unsupported") + } +} + +func TestExtractAndApply_TgzExtension(t *testing.T) { + tmpFile := createTempTarWithJSON(t, GzipCompression) + + // Rename to .tgz + tgzPath := tmpFile[:len(tmpFile)-4] + ".tgz" + if err := os.Rename(tmpFile, tgzPath); err != nil { + t.Fatalf("failed to rename: %v", err) + } + defer removeTempFile(t, tgzPath) + + err := ExtractAndApply(tgzPath, false, nullTarFunc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestExtractAndApply_AutodetectGzip(t *testing.T) { + // Create gzip tar but with .tar extension (no compression hint) + tmpFile := createTempTarWithJSON(t, GzipCompression) + defer removeTempFile(t, tmpFile) + defer removeTempFile(t, tmpFile+".gz") + + err := ExtractAndApply(tmpFile, false, nullTarFunc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestExtractAndApply_Bzip2Tar(t *testing.T) { + tmpFile := createTempTarWithJSON(t, Bzip2Compression) + defer removeTempFile(t, tmpFile) + + var callbackInvoked bool + err := ExtractAndApply(tmpFile, false, func(tr *tar.Reader) error { + callbackInvoked = true + // Verify we can read from the tar - Next() already called, second should be EOF + _, err := tr.Next() + if err != io.EOF { + t.Errorf("expected io.EOF for second Next() call, got: %v", err) + } + return nil + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !callbackInvoked { + t.Error("expected callback to be invoked") + } +} + +func TestGetCompressionType_EdgeCases(t *testing.T) { + tests := []struct { + name string + filename string + expected string + }{ + {"empty string", "", NoCompression.Extension()}, + {"just .gz", ".gz", NoCompression.Extension()}, + {"just .tgz", ".tgz", GzipCompression.Extension()}, + {"double extension tar.gz.gz", "file.tar.gz.gz", NoCompression.Extension()}, + {"case sensitive TAR.GZ", "file.TAR.GZ", NoCompression.Extension()}, + {"mixed case TaR.gZ", "file.TaR.gZ", NoCompression.Extension()}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getCompressionKind(tt.filename) + if result.Extension() != tt.expected { + t.Errorf("expected %q, got %q", tt.expected, result) + } + }) + } +} + +func TestExtractAndApply_CorruptedGzip(t *testing.T) { + tmpDir := t.TempDir() + tmpFile := filepath.Join(tmpDir, "corrupted.tar.gz") + + // Write corrupted gzip data + file, err := os.Create(tmpFile) + if err != nil { + t.Fatalf("failed to create file: %v", err) + } + // Gzip magic number but corrupted content + _, err = file.Write([]byte{0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00}) + if err != nil { + t.Fatalf("failed to write to file %s: %v", tmpFile, err) + } + err = file.Close() + if err != nil { + t.Fatalf("failed to close file %s: %v", tmpFile, err) + } + + err = ExtractAndApply(tmpFile, false, nullTarFunc) + if err == nil { + t.Error("expected error for corrupted gzip") + } +} + +func BenchmarkGetCompressionType(b *testing.B) { + filenames := []string{ + "file.tar.gz", + "file.tgz", + "file.tar.bz2", + "file.tbz", + "file.tar", + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, f := range filenames { + getCompressionKind(f) + } + } +} + +func BenchmarkExtractArchive(b *testing.B) { + tmpDir := b.TempDir() + tmpFile := filepath.Join(tmpDir, "bench.tar") + + jsonContent := `[{"request":"dGVzdA==","response":{"result":"ok"},"result":"ok"}]` + + file, _ := os.Create(tmpFile) + tarWriter := tar.NewWriter(file) + contentBytes := []byte(jsonContent) + header := &tar.Header{ + Name: "test.json", + Size: int64(len(contentBytes)), + Mode: 0644, + } + err := tarWriter.WriteHeader(header) + if err != nil { + b.Fatalf("unexpected error writing header for %s: %v", tmpFile, err) + } + _, err = tarWriter.Write(contentBytes) + if err != nil { + b.Fatalf("unexpected error writing content for %s: %v", tmpFile, err) + } + err = tarWriter.Close() + if err != nil { + b.Fatalf("unexpected error closing tar writer for %s: %v", tmpFile, err) + } + err = file.Close() + if err != nil { + b.Fatalf("unexpected error closing file for %s: %v", tmpFile, err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := ExtractAndApply(tmpFile, false, nullTarFunc) + if err != nil { + b.Fatalf("unexpected error: %v", err) + } + } +} + +func TestExtractAndApply_LargeJSON(t *testing.T) { + // Create a large JSON payload + var buf bytes.Buffer + buf.WriteString("[") + for i := 0; i < 100_000; i++ { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString(`{"request":"dGVzdA==","response":{"result":"ok"},"result":"ok"}`) + } + buf.WriteString("]") + + tmpFile := createTempTarFile(t, buf.String(), NoCompression) + defer removeTempFile(t, tmpFile) + + err := ExtractAndApply(tmpFile, false, nullTarFunc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestExtractAndApply_CallbackError(t *testing.T) { + tmpFile := createTempTarWithJSON(t, NoCompression) + defer removeTempFile(t, tmpFile) + + expectedErr := io.ErrUnexpectedEOF + err := ExtractAndApply(tmpFile, false, func(tr *tar.Reader) error { + return expectedErr + }) + if !errors.Is(err, expectedErr) { + t.Errorf("expected callback error to propagate, got: %v", err) + } +} + +func TestExtractAndApply_CallbackReadsContent(t *testing.T) { + expectedContent := `{"test":"value"}` + tmpFile := createTempTarFile(t, expectedContent, NoCompression) + defer removeTempFile(t, tmpFile) + + var readContent []byte + err := ExtractAndApply(tmpFile, false, func(tr *tar.Reader) error { + var err error + readContent, err = io.ReadAll(tr) + return err + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if string(readContent) != expectedContent { + t.Errorf("expected content %q, got %q", expectedContent, string(readContent)) + } +} + +func TestExtractAndApply_NonExistentFileCallbackNotCalled(t *testing.T) { + callbackCalled := false + err := ExtractAndApply("/nonexistent/path/file.tar", false, func(tr *tar.Reader) error { + callbackCalled = true + return nil + }) + if err == nil { + t.Error("expected error for non-existent file") + } + if callbackCalled { + t.Error("callback should not be called for non-existent file") + } +} diff --git a/cmd/integration/main.go b/cmd/integration/main.go index a1078541..1660ef6c 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -4,8 +4,6 @@ import ( "archive/tar" "bufio" "bytes" - "compress/bzip2" - "compress/gzip" "context" "crypto/rand" "encoding/hex" @@ -30,11 +28,13 @@ import ( "syscall" "time" - "github.com/erigontech/rpc-tests/cmd/integration/jsondiff" "github.com/golang-jwt/jwt/v5" "github.com/gorilla/websocket" "github.com/josephburnett/jd/v2" jsoniter "github.com/json-iterator/go" + + "github.com/erigontech/rpc-tests/cmd/integration/archive" + "github.com/erigontech/rpc-tests/cmd/integration/jsondiff" ) const ( @@ -176,128 +176,6 @@ var ( } ) -// Supported compression types -const ( - GzipCompression = ".gz" - Bzip2Compression = ".bz2" - NoCompression = "" -) - -// --- Helper Functions --- - -// getCompressionType determines the compression from the filename extension. -func getCompressionType(filename string) string { - if strings.HasSuffix(filename, ".tar.gz") || strings.HasSuffix(filename, ".tgz") { - return GzipCompression - } - if strings.HasSuffix(filename, ".tar.bz2") || strings.HasSuffix(filename, ".tbz") { - return Bzip2Compression - } - return NoCompression -} - -func autodetectCompression(inFile *os.File) (string, error) { - // Assume we have no compression and try to detect it if the tar header is invalid - compressionType := NoCompression - tarReader := tar.NewReader(inFile) - _, err := tarReader.Next() - if err != nil && !errors.Is(err, io.EOF) { - // Reset the file position for read and check if it's gzip encoded - _, err = inFile.Seek(0, io.SeekStart) - if err != nil { - return compressionType, err - } - _, err = gzip.NewReader(inFile) - if err == nil { - compressionType = GzipCompression - } else { - // Reset the file position for read and check if it's gzip encoded - _, err = inFile.Seek(0, io.SeekStart) - if err != nil { - return compressionType, err - } - _, err = tar.NewReader(bzip2.NewReader(inFile)).Next() - if err == nil { - compressionType = Bzip2Compression - } - } - } - return compressionType, nil -} - -// extractArchive extracts a compressed or uncompressed tar archive. -func extractArchive(archivePath string, sanitizeExtension bool, metrics *TestMetrics) ([]JsonRpcCommand, error) { - // Open the archive file - inputFile, err := os.Open(archivePath) - if err != nil { - return nil, fmt.Errorf("failed to open archive: %w", err) - } - defer func(inFile *os.File) { - _ = inFile.Close() - }(inputFile) - - // Wrap the input file with the correct compression reader - compressionType := getCompressionType(archivePath) - if compressionType == NoCompression { - // Possibly handle the corner case where the file is compressed but has tar extension - compressionType, err = autodetectCompression(inputFile) - if err != nil { - return nil, fmt.Errorf("failed to autodetect compression for archive: %w", err) - } - if compressionType != NoCompression { - // If any compression was detected, optionally rename and reopen the archive file - if sanitizeExtension { - err = os.Rename(archivePath, archivePath+compressionType) - if err != nil { - return nil, err - } - archivePath = archivePath + compressionType - } - } - inputFile, err = os.Open(archivePath) - if err != nil { - return nil, err - } - } - - var reader io.Reader - switch compressionType { - case GzipCompression: - if reader, err = gzip.NewReader(inputFile); err != nil { - return nil, fmt.Errorf("failed to create gzip reader: %w", err) - } - case Bzip2Compression: - reader = bzip2.NewReader(inputFile) - case NoCompression: - reader = inputFile - } - - var jsonrpcCommands []JsonRpcCommand - - // We expect the archive to contain a single JSON file - tarReader := tar.NewReader(reader) - header, err := tarReader.Next() - if err == io.EOF { - return jsonrpcCommands, nil // Empty archive - } - if err != nil { - return nil, fmt.Errorf("failed to read tar header: %w", err) - } - if header.Typeflag != tar.TypeReg { - return nil, fmt.Errorf("archive must contain a single JSON file, found %s", header.Name) - } - - bufferedReader := bufio.NewReaderSize(tarReader, 8*os.Getpagesize()) - - start := time.Now() - if err := jsoniter.NewDecoder(bufferedReader).Decode(&jsonrpcCommands); err != nil { - return jsonrpcCommands, errors.New("cannot parse JSON " + archivePath + ": " + err.Error()) - } - metrics.UnmarshallingTime += time.Since(start) - - return jsonrpcCommands, nil -} - type JsonDiffKind int const ( @@ -1340,7 +1218,24 @@ func isArchive(jsonFilename string) bool { return !strings.HasSuffix(jsonFilename, ".json") } -func extractJsonCommands(jsonFilename string, metrics *TestMetrics) ([]JsonRpcCommand, error) { +func extractJsonCommands(jsonFilename string, sanitizeExtension bool, metrics *TestMetrics) ([]JsonRpcCommand, error) { + var jsonrpcCommands []JsonRpcCommand + err := archive.ExtractAndApply(jsonFilename, sanitizeExtension, func(reader *tar.Reader) error { + bufferedReader := bufio.NewReaderSize(reader, 8*os.Getpagesize()) + start := time.Now() + if err := jsoniter.NewDecoder(bufferedReader).Decode(&jsonrpcCommands); err != nil { + return fmt.Errorf("failed to decode JSON: %w", err) + } + metrics.UnmarshallingTime += time.Since(start) + return nil + }) + if err != nil { + return nil, errors.New("cannot extract archive file " + jsonFilename) + } + return jsonrpcCommands, nil +} + +func readJsonCommands(jsonFilename string, metrics *TestMetrics) ([]JsonRpcCommand, error) { file, err := os.Open(jsonFilename) if err != nil { return nil, fmt.Errorf("cannot open file %s: %w", jsonFilename, err) @@ -1740,17 +1635,13 @@ func runTest(ctx context.Context, descriptor *TestDescriptor, config *Config) Te var jsonrpcCommands []JsonRpcCommand var err error if isArchive(jsonFilename) { - jsonrpcCommands, err = extractArchive(jsonFilename, config.SanitizeArchiveExt, &outcome.Metrics) - if err != nil { - outcome.Error = errors.New("cannot extract archive file " + jsonFilename) - return outcome - } + jsonrpcCommands, err = extractJsonCommands(jsonFilename, config.SanitizeArchiveExt, &outcome.Metrics) } else { - jsonrpcCommands, err = extractJsonCommands(jsonFilename, &outcome.Metrics) - if err != nil { - outcome.Error = err - return outcome - } + jsonrpcCommands, err = readJsonCommands(jsonFilename, &outcome.Metrics) + } + if err != nil { + outcome.Error = err + return outcome } if len(jsonrpcCommands) != 1 { From bb7e82bb030e5cede854cfa10ac7f99d98593c4e Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Tue, 13 Jan 2026 08:47:54 +0100 Subject: [PATCH 58/87] integration: refactor archive module --- cmd/integration/archive/archive.go | 4 +- cmd/integration/archive/archive_test.go | 70 ++++++++++++------------- cmd/integration/main.go | 2 +- 3 files changed, 38 insertions(+), 38 deletions(-) diff --git a/cmd/integration/archive/archive.go b/cmd/integration/archive/archive.go index 5d87289f..4f732c46 100644 --- a/cmd/integration/archive/archive.go +++ b/cmd/integration/archive/archive.go @@ -68,8 +68,8 @@ func autodetectCompression(inFile *os.File) (Compression, error) { return compressionType, nil } -// ExtractAndApply extracts a compressed or uncompressed tar archive and applies the given function to it. -func ExtractAndApply(archivePath string, sanitizeExtension bool, f func(*tar.Reader) error) error { +// Extract extracts a compressed or uncompressed tar archive and applies the given function to it. +func Extract(archivePath string, sanitizeExtension bool, f func(*tar.Reader) error) error { inputFile, err := os.Open(archivePath) if err != nil { return fmt.Errorf("failed to open archive: %w", err) diff --git a/cmd/integration/archive/archive_test.go b/cmd/integration/archive/archive_test.go index 6cb026e7..5223c5f0 100644 --- a/cmd/integration/archive/archive_test.go +++ b/cmd/integration/archive/archive_test.go @@ -241,24 +241,24 @@ func TestAutodetectCompression_InvalidFile(t *testing.T) { var nullTarFunc = func(*tar.Reader) error { return nil } -func TestExtractAndApply_NonExistentFile(t *testing.T) { - err := ExtractAndApply("/nonexistent/path/file.tar", false, nullTarFunc) +func TestExtract_NonExistentFile(t *testing.T) { + err := Extract("/nonexistent/path/file.tar", false, nullTarFunc) if err == nil { t.Error("expected error for non-existent file") } } -func TestExtractAndApply_UncompressedTar(t *testing.T) { +func TestExtract_UncompressedTar(t *testing.T) { tmpFilePath := createTempTarWithJSON(t, NoCompression) defer removeTempFile(t, tmpFilePath) - err := ExtractAndApply(tmpFilePath, false, nullTarFunc) + err := Extract(tmpFilePath, false, nullTarFunc) if err != nil { t.Fatalf("unexpected error: %v", err) } } -func TestExtractAndApply_GzipTar(t *testing.T) { +func TestExtract_GzipTar(t *testing.T) { tmpFile := createTempTarWithJSON(t, GzipCompression) defer removeTempFile(t, tmpFile) @@ -269,49 +269,49 @@ func TestExtractAndApply_GzipTar(t *testing.T) { } defer removeTempFile(t, newPath) - err := ExtractAndApply(newPath, false, nullTarFunc) + err := Extract(newPath, false, nullTarFunc) if err != nil { t.Fatalf("unexpected error: %v", err) } } -func TestExtractAndApply_NilMetrics(t *testing.T) { +func TestExtract_NilMetrics(t *testing.T) { tmpFile := createTempTarWithJSON(t, NoCompression) defer removeTempFile(t, tmpFile) - err := ExtractAndApply(tmpFile, false, nullTarFunc) + err := Extract(tmpFile, false, nullTarFunc) if err != nil { t.Fatalf("unexpected error: %v", err) } } -func TestExtractAndApply_EmptyArchive(t *testing.T) { +func TestExtract_EmptyArchive(t *testing.T) { tmpFile := createEmptyTarFile(t) defer removeTempFile(t, tmpFile) // Empty archive should return error since Next() is called internally - err := ExtractAndApply(tmpFile, false, nullTarFunc) + err := Extract(tmpFile, false, nullTarFunc) if err == nil { t.Error("expected error for empty archive") } } -func TestExtractAndApply_InvalidJSON(t *testing.T) { +func TestExtract_InvalidJSON(t *testing.T) { tmpFile := createTempTarFile(t, "invalid json content", NoCompression) defer removeTempFile(t, tmpFile) - err := ExtractAndApply(tmpFile, false, nullTarFunc) + err := Extract(tmpFile, false, nullTarFunc) if err != nil { - t.Fatalf("unexpected error from ExtractAndApply: %v", err) + t.Fatalf("unexpected error from Extract: %v", err) } } -func TestExtractAndApply_SanitizeExtension(t *testing.T) { +func TestExtract_SanitizeExtension(t *testing.T) { tmpFile := createTempTarWithJSON(t, GzipCompression) defer removeTempFile(t, tmpFile) defer removeTempFile(t, tmpFile+".gz") - err := ExtractAndApply(tmpFile, true, nullTarFunc) + err := Extract(tmpFile, true, nullTarFunc) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -325,17 +325,17 @@ func TestExtractAndApply_SanitizeExtension(t *testing.T) { } } -func TestExtractAndApply_DirectoryInArchive(t *testing.T) { +func TestExtract_DirectoryInArchive(t *testing.T) { tmpFile := createTempTarWithDirectory(t) defer removeTempFile(t, tmpFile) - err := ExtractAndApply(tmpFile, false, nullTarFunc) + err := Extract(tmpFile, false, nullTarFunc) if err == nil { t.Error("expected error for directory in archive as unsupported") } } -func TestExtractAndApply_TgzExtension(t *testing.T) { +func TestExtract_TgzExtension(t *testing.T) { tmpFile := createTempTarWithJSON(t, GzipCompression) // Rename to .tgz @@ -345,30 +345,30 @@ func TestExtractAndApply_TgzExtension(t *testing.T) { } defer removeTempFile(t, tgzPath) - err := ExtractAndApply(tgzPath, false, nullTarFunc) + err := Extract(tgzPath, false, nullTarFunc) if err != nil { t.Fatalf("unexpected error: %v", err) } } -func TestExtractAndApply_AutodetectGzip(t *testing.T) { +func TestExtract_AutodetectGzip(t *testing.T) { // Create gzip tar but with .tar extension (no compression hint) tmpFile := createTempTarWithJSON(t, GzipCompression) defer removeTempFile(t, tmpFile) defer removeTempFile(t, tmpFile+".gz") - err := ExtractAndApply(tmpFile, false, nullTarFunc) + err := Extract(tmpFile, false, nullTarFunc) if err != nil { t.Fatalf("unexpected error: %v", err) } } -func TestExtractAndApply_Bzip2Tar(t *testing.T) { +func TestExtract_Bzip2Tar(t *testing.T) { tmpFile := createTempTarWithJSON(t, Bzip2Compression) defer removeTempFile(t, tmpFile) var callbackInvoked bool - err := ExtractAndApply(tmpFile, false, func(tr *tar.Reader) error { + err := Extract(tmpFile, false, func(tr *tar.Reader) error { callbackInvoked = true // Verify we can read from the tar - Next() already called, second should be EOF _, err := tr.Next() @@ -409,7 +409,7 @@ func TestGetCompressionType_EdgeCases(t *testing.T) { } } -func TestExtractAndApply_CorruptedGzip(t *testing.T) { +func TestExtract_CorruptedGzip(t *testing.T) { tmpDir := t.TempDir() tmpFile := filepath.Join(tmpDir, "corrupted.tar.gz") @@ -428,7 +428,7 @@ func TestExtractAndApply_CorruptedGzip(t *testing.T) { t.Fatalf("failed to close file %s: %v", tmpFile, err) } - err = ExtractAndApply(tmpFile, false, nullTarFunc) + err = Extract(tmpFile, false, nullTarFunc) if err == nil { t.Error("expected error for corrupted gzip") } @@ -451,7 +451,7 @@ func BenchmarkGetCompressionType(b *testing.B) { } } -func BenchmarkExtractArchive(b *testing.B) { +func BenchmarkExtract(b *testing.B) { tmpDir := b.TempDir() tmpFile := filepath.Join(tmpDir, "bench.tar") @@ -484,14 +484,14 @@ func BenchmarkExtractArchive(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - err := ExtractAndApply(tmpFile, false, nullTarFunc) + err := Extract(tmpFile, false, nullTarFunc) if err != nil { b.Fatalf("unexpected error: %v", err) } } } -func TestExtractAndApply_LargeJSON(t *testing.T) { +func TestExtract_LargeJSON(t *testing.T) { // Create a large JSON payload var buf bytes.Buffer buf.WriteString("[") @@ -506,18 +506,18 @@ func TestExtractAndApply_LargeJSON(t *testing.T) { tmpFile := createTempTarFile(t, buf.String(), NoCompression) defer removeTempFile(t, tmpFile) - err := ExtractAndApply(tmpFile, false, nullTarFunc) + err := Extract(tmpFile, false, nullTarFunc) if err != nil { t.Fatalf("unexpected error: %v", err) } } -func TestExtractAndApply_CallbackError(t *testing.T) { +func TestExtract_CallbackError(t *testing.T) { tmpFile := createTempTarWithJSON(t, NoCompression) defer removeTempFile(t, tmpFile) expectedErr := io.ErrUnexpectedEOF - err := ExtractAndApply(tmpFile, false, func(tr *tar.Reader) error { + err := Extract(tmpFile, false, func(tr *tar.Reader) error { return expectedErr }) if !errors.Is(err, expectedErr) { @@ -525,13 +525,13 @@ func TestExtractAndApply_CallbackError(t *testing.T) { } } -func TestExtractAndApply_CallbackReadsContent(t *testing.T) { +func TestExtract_CallbackReadsContent(t *testing.T) { expectedContent := `{"test":"value"}` tmpFile := createTempTarFile(t, expectedContent, NoCompression) defer removeTempFile(t, tmpFile) var readContent []byte - err := ExtractAndApply(tmpFile, false, func(tr *tar.Reader) error { + err := Extract(tmpFile, false, func(tr *tar.Reader) error { var err error readContent, err = io.ReadAll(tr) return err @@ -544,9 +544,9 @@ func TestExtractAndApply_CallbackReadsContent(t *testing.T) { } } -func TestExtractAndApply_NonExistentFileCallbackNotCalled(t *testing.T) { +func TestExtract_NonExistentFileCallbackNotCalled(t *testing.T) { callbackCalled := false - err := ExtractAndApply("/nonexistent/path/file.tar", false, func(tr *tar.Reader) error { + err := Extract("/nonexistent/path/file.tar", false, func(tr *tar.Reader) error { callbackCalled = true return nil }) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 1660ef6c..e756a5d2 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1220,7 +1220,7 @@ func isArchive(jsonFilename string) bool { func extractJsonCommands(jsonFilename string, sanitizeExtension bool, metrics *TestMetrics) ([]JsonRpcCommand, error) { var jsonrpcCommands []JsonRpcCommand - err := archive.ExtractAndApply(jsonFilename, sanitizeExtension, func(reader *tar.Reader) error { + err := archive.Extract(jsonFilename, sanitizeExtension, func(reader *tar.Reader) error { bufferedReader := bufio.NewReaderSize(reader, 8*os.Getpagesize()) start := time.Now() if err := jsoniter.NewDecoder(bufferedReader).Decode(&jsonrpcCommands); err != nil { From c9cc354fa30318e045b2b099fd5316bd38f52c91 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 13 Feb 2026 17:40:30 +0100 Subject: [PATCH 59/87] complete refactoring + unit tests + benchmarks add AI context and skill --- .claude/skills/erigon-rpcdaemon-run/SKILL.md | 76 + CLAUDE.md | 85 + cmd/archive/main.go | 337 +++ cmd/integration/main.go | 2030 +----------------- cmd/perf/main.go | 1830 +--------------- go.mod | 2 +- internal/compare/comparator.go | 439 ++++ internal/compare/comparator_bench_test.go | 93 + internal/compare/comparator_test.go | 262 +++ internal/config/config.go | 321 +++ internal/config/config_test.go | 329 +++ internal/filter/filter.go | 204 ++ internal/filter/filter_bench_test.go | 59 + internal/filter/filter_test.go | 262 +++ internal/filter/lists.go | 136 ++ internal/perf/config.go | 136 ++ internal/perf/hardware.go | 201 ++ internal/perf/perf_bench_test.go | 53 + internal/perf/perf_test.go | 262 +++ internal/perf/report.go | 442 ++++ internal/perf/sequence.go | 99 + internal/perf/vegeta.go | 588 +++++ internal/rpc/client.go | 43 + internal/rpc/client_bench_test.go | 39 + internal/rpc/client_test.go | 255 +++ internal/rpc/http.go | 273 +++ internal/rpc/websocket.go | 68 + internal/runner/executor.go | 179 ++ internal/runner/runner.go | 285 +++ internal/runner/runner_bench_test.go | 58 + internal/runner/runner_test.go | 149 ++ internal/runner/stats.go | 59 + internal/testdata/discovery.go | 99 + internal/testdata/discovery_bench_test.go | 21 + internal/testdata/discovery_test.go | 234 ++ internal/testdata/loader.go | 72 + internal/testdata/types.go | 79 + 37 files changed, 6472 insertions(+), 3687 deletions(-) create mode 100644 .claude/skills/erigon-rpcdaemon-run/SKILL.md create mode 100644 CLAUDE.md create mode 100644 cmd/archive/main.go create mode 100644 internal/compare/comparator.go create mode 100644 internal/compare/comparator_bench_test.go create mode 100644 internal/compare/comparator_test.go create mode 100644 internal/config/config.go create mode 100644 internal/config/config_test.go create mode 100644 internal/filter/filter.go create mode 100644 internal/filter/filter_bench_test.go create mode 100644 internal/filter/filter_test.go create mode 100644 internal/filter/lists.go create mode 100644 internal/perf/config.go create mode 100644 internal/perf/hardware.go create mode 100644 internal/perf/perf_bench_test.go create mode 100644 internal/perf/perf_test.go create mode 100644 internal/perf/report.go create mode 100644 internal/perf/sequence.go create mode 100644 internal/perf/vegeta.go create mode 100644 internal/rpc/client.go create mode 100644 internal/rpc/client_bench_test.go create mode 100644 internal/rpc/client_test.go create mode 100644 internal/rpc/http.go create mode 100644 internal/rpc/websocket.go create mode 100644 internal/runner/executor.go create mode 100644 internal/runner/runner.go create mode 100644 internal/runner/runner_bench_test.go create mode 100644 internal/runner/runner_test.go create mode 100644 internal/runner/stats.go create mode 100644 internal/testdata/discovery.go create mode 100644 internal/testdata/discovery_bench_test.go create mode 100644 internal/testdata/discovery_test.go create mode 100644 internal/testdata/loader.go create mode 100644 internal/testdata/types.go diff --git a/.claude/skills/erigon-rpcdaemon-run/SKILL.md b/.claude/skills/erigon-rpcdaemon-run/SKILL.md new file mode 100644 index 00000000..71d879b8 --- /dev/null +++ b/.claude/skills/erigon-rpcdaemon-run/SKILL.md @@ -0,0 +1,76 @@ +--- +name: erigon-rpcdaemon-run +description: Use to run standalone Erigon RpcDaemon on an existing datadir. Use when the user wants to exercise the `rpc-tests` binaries (`rpc_int`, `rpc_perf`) against real server. +allowed-tools: Bash, Read, Glob +--- + +# Erigon RpcDaemon Run + +## Overview +The `rpcdaemon` command runs standalone RpcDaemon on an existing Erigon datadir. + +## Command Syntax + +```bash +cd && ./build/bin/rpcdaemon --datadir= --http.api admin,debug,eth,parity,erigon,trace,web3,txpool,ots,net --ws [other-flags] +``` + +## Required Flags + +- `--datadir`: Path to the Erigon datadir (required) + +## Usage Patterns + +### Change HTTP port +```bash +cd && ./build/bin/rpcdaemon --datadir= --http.port=8546 +``` + +### WebSocket support +```bash +cd && ./build/bin/rpcdaemon --datadir= --ws +``` + +## Important Considerations + +### Before Running +1. **Ask for Erigon home**: Ask the user which Erigon home folder to use if not already provided +2. **Stop Erigon and RpcDaemon**: Ensure Erigon and/or RpcDaemon are not running on the target datadir +3. **Ensure RpcDaemon binary is built**: run `make rpcdaemon` to build it + +### After Running +1. Wait until the HTTP port (value provided with --http.port or default 8545) is reachable + + +## Workflow + +When the user wants to run Erigon RpcDaemon: + +1. **Confirm parameters** + - Ask for Erigon home path to use if not provided or know in context + - Ask for target datadir path + +2. **Safety checks** + - Verify Erigon home exists + - Verify datadir exists + - Check if Erigon and/or RpcDaemon are running (should not be) + + +## Error Handling + +Common issues: +- **"datadir not found"**: Verify the path is correct +- **"database locked"**: Stop Erigon process first + + +## Examples + +### Example 1: All API namespaces and WebSocket enabled +```bash +cd ../erigon_devel && ./build/bin/rpcdaemon --datadir=~/Library/erigon-eth-mainnet --http.api admin,debug,eth,parity,erigon,trace,web3,txpool,ots,net --ws +``` + + +## Tips + +- If building from source, use `make rpcdaemon` within to build the binary at `build/bin/rpcdaemon` diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..dacfb554 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,85 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Overview + +`rpc-tests` is a collection of JSON-RPC black-box testing tools for Ethereum node implementations. It sends JSON-RPC requests to a running RPC daemon and compares responses against expected results stored as JSON test fixtures. The codebase has both Go (primary, being actively developed) and Python (legacy) implementations. + +## Build & Run + +```bash +# Build the integration test binary +go build -o ./build/bin/rpc_int ./cmd/integration/main.go + +# Run Go unit tests +go test ./cmd/integration/archive/ +go test ./cmd/integration/jsondiff/ + +# Run Python unit tests +pytest + +# Run integration tests (requires a running RPC daemon on localhost:8545) +./build/bin/rpc_int -c -f # All tests, continue on fail, show only failures +./build/bin/rpc_int -t 246 # Single test by global number +./build/bin/rpc_int -A eth_getLogs -t 3 # Single test by API + test number +./build/bin/rpc_int -A eth_call # All tests for one API +./build/bin/rpc_int -a eth_ -c -f -S # APIs matching pattern, serial mode +./build/bin/rpc_int -b sepolia -c -f # Different network +``` + +## Architecture + +**Three independent tools** under `cmd/`: +- `cmd/integration/` — RPC integration test runner (primary tool, ~2100 lines in main.go) +- `cmd/compat/` — RPC compatibility checker +- `cmd/perf/` — Load/performance testing (uses Vegeta) + +**Integration test runner flow:** +1. Scans `integration/{network}/` for test fixture files (JSON or tar archives) +2. Tests are globally numbered across all APIs and filtered by CLI flags +3. Executes in parallel (worker pool, `runtime.NumCPU()` workers) by default +4. Sends JSON-RPC request from each test fixture to the daemon +5. Compares actual response against expected response using JSON diff +6. Reports results with colored output, saves diffs to `{network}/results/` + +**Supporting packages:** +- `cmd/integration/archive/` — Extract test fixtures from tar/gzip/bzip2 archives +- `cmd/integration/jsondiff/` — Pure Go JSON diff with colored output +- `cmd/integration/rpc/` — HTTP JSON-RPC client with JWT auth and compression support + +**Active v2 refactor** (branch `canepat/v2`): `integration/cli/` is a restructured version of the test runner using `urfave/cli/v2`, splitting the monolithic main.go into focused modules: `flags.go` (config), `test_runner.go` (orchestration), `test_execution.go` (per-test logic), `test_comparator.go` (response comparison), `test_filter.go` (filtering), `rpc.go` (client), `utils.go`. + +**Test fixture format** — each test is a JSON file (or tarball containing JSON): +```json +{ + "request": [{"jsonrpc":"2.0","method":"eth_call","params":[...],"id":1}], + "response": [{"jsonrpc":"2.0","id":1,"result":"0x..."}] +} +``` + +Test data lives in `integration/{network}/{api_name}/test_NN.json` across networks: mainnet, sepolia, gnosis, arb-sepolia, polygon-pos. + +## Key CLI Flags + +| Flag | Description | +|------|-------------| +| `-c` | Continue on test failure (default: exit on first failure) | +| `-f` | Display only failed tests | +| `-S` | Serial execution (default: parallel) | +| `-v 0/1/2` | Verbosity level | +| `-b ` | Blockchain: mainnet, sepolia, gnosis (default: mainnet) | +| `-H ` / `-p ` | RPC daemon address (default: localhost:8545) | +| `-A ` | Filter by exact API name (comma-separated) | +| `-a ` | Filter by API name pattern | +| `-t ` | Run single test by number | +| `-x ` | Exclude APIs | +| `-X ` | Exclude test numbers | +| `-T ` | Transport: http, http_comp, https, websocket, websocket_comp | +| `-k ` | JWT secret file for engine API auth | + +## Dependencies + +Go 1.24. Key libraries: `gorilla/websocket` (WebSocket transport), `josephburnett/jd/v2` (JSON diffing), `tsenart/vegeta/v12` (load testing), `urfave/cli/v2` (CLI framework for v2), `golang-jwt/jwt/v5` (JWT auth), `dsnet/compress` (bzip2). + +Python 3.10+ with `requirements.txt` for legacy runner and standalone tools in `src/rpctests/`. \ No newline at end of file diff --git a/cmd/archive/main.go b/cmd/archive/main.go new file mode 100644 index 00000000..89706fa1 --- /dev/null +++ b/cmd/archive/main.go @@ -0,0 +1,337 @@ +package main + +import ( + "archive/tar" + "compress/bzip2" + "compress/gzip" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + bzip2w "github.com/dsnet/compress/bzip2" +) + +// Supported compression types +const ( + GzipCompression = ".gz" + Bzip2Compression = ".bz2" + NoCompression = "" +) + +// --- Helper Functions --- + +// getCompressionType determines the compression from the filename extension. +func getCompressionType(filename string) string { + if strings.HasSuffix(filename, ".tar.gz") || strings.HasSuffix(filename, ".tgz") { + return GzipCompression + } + if strings.HasSuffix(filename, ".tar.bz2") || strings.HasSuffix(filename, ".tbz") { + return Bzip2Compression + } + return NoCompression +} + +// --- Archiving Logic --- + +// createArchive creates a compressed or uncompressed tar archive. +func createArchive(archivePath string, files []string) error { + fmt.Printf("📦 Creating archive: %s\n", archivePath) + + // 1. Create the output file + outFile, err := os.Create(archivePath) + if err != nil { + return fmt.Errorf("failed to create output file: %w", err) + } + defer outFile.Close() + + // 2. Wrap the output file with the correct compression writer + var writer io.WriteCloser = outFile + compressionType := getCompressionType(archivePath) + + switch compressionType { + case GzipCompression: + writer = gzip.NewWriter(outFile) + case Bzip2Compression: + config := &bzip2w.WriterConfig{Level: bzip2w.BestCompression} + writer, err = bzip2w.NewWriter(outFile, config) + if err != nil { + return fmt.Errorf("failed to create bzip2 writer: %w", err) + } + } + // For robustness in a real-world scenario, you'd check and defer Close() on the compression writer. + // For this demonstration, we'll focus on the tar writer cleanup. + + // 3. Create the Tar writer + tarWriter := tar.NewWriter(writer) + defer tarWriter.Close() + + // 4. Add files to the archive + for _, file := range files { + err := addFileToTar(tarWriter, file, "") + if err != nil { + return fmt.Errorf("failed to add file %s: %w", file, err) + } + } + + // 5. Explicitly close the compression writer if it was used (before closing the tar writer) + if compressionType != NoCompression { + if err := writer.Close(); err != nil { + return fmt.Errorf("failed to close compression writer: %w", err) + } + } + + return nil +} + +// addFileToTar recursively adds a file or directory to the tar archive. +func addFileToTar(tarWriter *tar.Writer, filePath, baseDir string) error { + fileInfo, err := os.Stat(filePath) + if err != nil { + return err + } + + // Determine the name inside the archive (relative path) + var link string + if fileInfo.Mode()&os.ModeSymlink != 0 { + link, err = os.Readlink(filePath) + if err != nil { + return err + } + } + + // If baseDir is not empty, use the relative path, otherwise use the basename + nameInArchive := filePath + if baseDir != "" && strings.HasPrefix(filePath, baseDir) { + nameInArchive = filePath[len(baseDir)+1:] + } else { + nameInArchive = filepath.Base(filePath) + } + + // Create the Tar Header + header, err := tar.FileInfoHeader(fileInfo, link) + if err != nil { + return err + } + header.Name = nameInArchive + + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + + // Write file contents if it's a regular file + if fileInfo.Mode().IsRegular() { + file, err := os.Open(filePath) + if err != nil { + return err + } + defer file.Close() + + if _, err := io.Copy(tarWriter, file); err != nil { + return err + } + fmt.Printf(" -> Added: %s\n", filePath) + } + + // Recurse into directories + if fileInfo.IsDir() { + dirEntries, err := os.ReadDir(filePath) + if err != nil { + return err + } + for _, entry := range dirEntries { + fullPath := filepath.Join(filePath, entry.Name()) + // Keep the original baseDir if it was set, otherwise set it to the current path's parent + newBaseDir := baseDir + if baseDir == "" { + // Special handling for the root call: use the current path as the new base. + // This ensures nested files have relative paths within the archive. + newBaseDir = filePath + } + if err := addFileToTar(tarWriter, fullPath, newBaseDir); err != nil { + return err + } + } + } + + return nil +} + +// --- Unarchiving Logic --- + +func autodetectCompression(archivePath string, inFile *os.File) (string, error) { + compressionType := NoCompression + tarReader := tar.NewReader(inFile) + _, err := tarReader.Next() + if err != nil { + inFile.Close() + inFile, err = os.Open(archivePath) + _, err = gzip.NewReader(inFile) + if err == nil { // gzip is OK, rename + compressionType = GzipCompression + err := inFile.Close() + if err != nil { + return compressionType, err + } + } else { + inFile.Close() + inFile, err = os.Open(archivePath) + _, err = tar.NewReader(bzip2.NewReader(inFile)).Next() + inFile.Close() + if err == nil { // bzip2 is OK, rename + compressionType = Bzip2Compression + } + } + } + return compressionType, nil +} + +// extractArchive extracts a compressed or uncompressed tar archive. +func extractArchive(archivePath string, renameIfCompressed bool) error { + fmt.Printf("📂 Extracting archive: %s\n", archivePath) + + // 1. Open the archive file + inFile, err := os.Open(archivePath) + if err != nil { + return fmt.Errorf("failed to open archive: %w", err) + } + defer inFile.Close() + + // 2. Wrap the input file with the correct compression reader + compressionType := getCompressionType(archivePath) + if compressionType == NoCompression { + // Handle the corner case where the file is compressed but has tar extension + compressionType, err = autodetectCompression(archivePath, inFile) + if err != nil { + return fmt.Errorf("failed to autodetect compression for archive: %w", err) + } + if compressionType != NoCompression && renameIfCompressed { + err = os.Rename(archivePath, archivePath+compressionType) + if err != nil { + return err + } + archivePath = archivePath + compressionType + } + inFile, err = os.Open(archivePath) + if err != nil { + return err + } + } + + var reader io.Reader + switch compressionType { + case GzipCompression: + if reader, err = gzip.NewReader(inFile); err != nil { + return fmt.Errorf("failed to create gzip reader: %w", err) + } + // gzip.NewReader has an implicit Close() that cleans up the internal state, + // but since we wrap it in a tar reader, we rely on the tar reader for overall flow. + // In a production scenario, you would defer the close of the gzip reader. + case Bzip2Compression: + reader = bzip2.NewReader(inFile) + case NoCompression: + reader = inFile + } + + // 3. Create the Tar reader + tarReader := tar.NewReader(reader) + + // 4. Iterate over files in the archive and extract them + for { + header, err := tarReader.Next() + + if err == io.EOF { + break // End of archive + } + if err != nil { + return fmt.Errorf("failed to read tar header: %w", err) + } + + targetPath := filepath.Dir(archivePath) + "/" + header.Name + + switch header.Typeflag { + case tar.TypeDir: + // Create directory + if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil { + return fmt.Errorf("failed to create directory %s: %w", targetPath, err) + } + fmt.Printf(" -> Created directory: %s\n", targetPath) + + case tar.TypeReg: + // Ensure the parent directory exists before creating the file + if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil { + return fmt.Errorf("failed to create parent directory for %s: %w", targetPath, err) + } + + // Create the file + outFile, err := os.OpenFile(targetPath, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + if err != nil { + return fmt.Errorf("failed to create file %s: %w", targetPath, err) + } + + // Write content + if _, err := io.Copy(outFile, tarReader); err != nil { + outFile.Close() + return fmt.Errorf("failed to write file content for %s: %w", targetPath, err) + } + outFile.Close() + fmt.Printf(" -> Extracted file: %s\n", targetPath) + + default: + fmt.Printf(" -> Skipping unsupported file type %c: %s\n", header.Typeflag, targetPath) + } + } + + return nil +} + +// --- Main Function and CLI --- + +func main() { + // Define command-line flags + extractFlag := flag.Bool("x", false, "Extract (unarchive) files from the archive.") + renameFlag := flag.Bool("r", false, "Rename the archive when extracting if it's compressed.") + + // The archive name is always the first non-flag argument + flag.Usage = func() { + _, _ = fmt.Fprintf(os.Stderr, "Usage:\n") + _, _ = fmt.Fprintf(os.Stderr, " Archive: %s [file_or_dir_2]...\n", os.Args[0]) + _, _ = fmt.Fprintf(os.Stderr, " Unarchive: %s -x \n\n", os.Args[0]) + _, _ = fmt.Fprintf(os.Stderr, "Supported extensions: .tar, .tar.gz/.tgz, .tar.bz2/.tbz\n\n") + _, _ = fmt.Fprintf(os.Stderr, "Options:\n") + flag.PrintDefaults() + } + + flag.Parse() + args := flag.Args() + if len(args) < 1 { + flag.Usage() + os.Exit(1) + } + + archivePath := args[0] + + if *extractFlag { + // UNARCHIVE MODE (-x) + if err := extractArchive(archivePath, *renameFlag); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "❌ Error during extraction: %v\n", err) + os.Exit(1) + } + fmt.Println("✅ Extraction complete.") + } else { + // ARCHIVE MODE (default) + if len(args) < 2 { + _, _ = fmt.Fprintf(os.Stderr, "Error: Must specify files/directories to archive.\n\n") + flag.Usage() + os.Exit(1) + } + filesToArchive := args[1:] + if err := createArchive(archivePath, filesToArchive); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "❌ Error during archiving: %v\n", err) + os.Exit(1) + } + fmt.Println("✅ Archiving complete.") + } +} diff --git a/cmd/integration/main.go b/cmd/integration/main.go index e756a5d2..6c603fb1 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -1,329 +1,21 @@ package main import ( - "archive/tar" - "bufio" - "bytes" "context" - "crypto/rand" - "encoding/hex" - "errors" "flag" "fmt" - "io" - "net/http" "os" - "os/exec" "os/signal" - "path/filepath" - "reflect" - "regexp" "runtime" "runtime/pprof" "runtime/trace" - "sort" - "strconv" - "strings" - "sync" "syscall" - "time" - "github.com/golang-jwt/jwt/v5" - "github.com/gorilla/websocket" - "github.com/josephburnett/jd/v2" - jsoniter "github.com/json-iterator/go" - - "github.com/erigontech/rpc-tests/cmd/integration/archive" - "github.com/erigontech/rpc-tests/cmd/integration/jsondiff" -) - -const ( - DaemonOnOtherPort = "other-daemon" - DaemonOnDefaultPort = "rpcdaemon" - None = "none" - ExternalProvider = "external-provider" - TimeInterval = 100 * time.Millisecond - MaxTime = 200 - TempDirname = "./temp_rpc_tests" -) - -var ( - apiNotCompared = []string{ - "mainnet/engine_getClientVersionV1", - "mainnet/trace_rawTransaction", - "mainnet/engine_", - } - - // testsOnLatest - add your list here - testsOnLatest = []string{ - "mainnet/debug_traceBlockByNumber/test_24.json", - "mainnet/debug_traceBlockByNumber/test_30.json", - "mainnet/debug_traceCall/test_22.json", - "mainnet/debug_traceCall/test_33.json", - "mainnet/debug_traceCall/test_34.json", - "mainnet/debug_traceCall/test_35.json", - "mainnet/debug_traceCall/test_36.json", - "mainnet/debug_traceCall/test_37.json", - "mainnet/debug_traceCall/test_38.json", - "mainnet/debug_traceCall/test_39.json", - "mainnet/debug_traceCall/test_40.json", - "mainnet/debug_traceCall/test_41.json", - "mainnet/debug_traceCall/test_42.json", - "mainnet/debug_traceCall/test_43.json", - "mainnet/debug_traceCallMany/test_11.json", - "mainnet/debug_traceCallMany/test_12.json", - "mainnet/eth_blobBaseFee", // works always on the latest block - "mainnet/eth_blockNumber", // works always on the latest block - "mainnet/eth_call/test_20.json", - "mainnet/eth_call/test_28.json", - "mainnet/eth_call/test_29.json", - "mainnet/eth_call/test_36.json", - "mainnet/eth_call/test_37.json", - "mainnet/eth_callBundle/test_09.json", - "mainnet/eth_createAccessList/test_18.json", - "mainnet/eth_createAccessList/test_19.json", - "mainnet/eth_createAccessList/test_20.json", - "mainnet/eth_createAccessList/test_22.json", - "mainnet/eth_estimateGas/test_01", - "mainnet/eth_estimateGas/test_02", - "mainnet/eth_estimateGas/test_03", - "mainnet/eth_estimateGas/test_04", - "mainnet/eth_estimateGas/test_05", - "mainnet/eth_estimateGas/test_06", - "mainnet/eth_estimateGas/test_07", - "mainnet/eth_estimateGas/test_08", - "mainnet/eth_estimateGas/test_09", - "mainnet/eth_estimateGas/test_10", - "mainnet/eth_estimateGas/test_11", - "mainnet/eth_estimateGas/test_12", - "mainnet/eth_estimateGas/test_21", - "mainnet/eth_estimateGas/test_22", - "mainnet/eth_estimateGas/test_23", - "mainnet/eth_estimateGas/test_27", - "mainnet/eth_feeHistory/test_07.json", - "mainnet/eth_feeHistory/test_22.json", - "mainnet/eth_gasPrice", // works always on the latest block - "mainnet/eth_getBalance/test_03.json", - "mainnet/eth_getBalance/test_26.json", - "mainnet/eth_getBalance/test_27.json", - "mainnet/eth_getBlockTransactionCountByNumber/test_03.json", - "mainnet/eth_getBlockByNumber/test_10.json", - "mainnet/eth_getBlockByNumber/test_27.json", - "mainnet/eth_getBlockReceipts/test_07.json", - "mainnet/eth_getCode/test_05.json", - "mainnet/eth_getCode/test_06.json", - "mainnet/eth_getCode/test_07.json", - "mainnet/eth_getLogs/test_21.json", - "mainnet/eth_getProof/test_01.json", - "mainnet/eth_getProof/test_02.json", - "mainnet/eth_getProof/test_03.json", - "mainnet/eth_getProof/test_04.json", - "mainnet/eth_getProof/test_05.json", - "mainnet/eth_getProof/test_06.json", - "mainnet/eth_getProof/test_07.json", - "mainnet/eth_getProof/test_08.json", - "mainnet/eth_getProof/test_09.json", - "mainnet/eth_getProof/test_10.json", - "mainnet/eth_getProof/test_11.json", - "mainnet/eth_getProof/test_12.json", - "mainnet/eth_getProof/test_13.json", - "mainnet/eth_getProof/test_14.json", - "mainnet/eth_getProof/test_15.json", - "mainnet/eth_getProof/test_16.json", - "mainnet/eth_getProof/test_17.json", - "mainnet/eth_getProof/test_18.json", - "mainnet/eth_getProof/test_19.json", - "mainnet/eth_getProof/test_20.json", - "mainnet/eth_getRawTransactionByBlockNumberAndIndex/test_11.json", - "mainnet/eth_getRawTransactionByBlockNumberAndIndex/test_12.json", - "mainnet/eth_getRawTransactionByBlockNumberAndIndex/test_13.json", - "mainnet/eth_getStorageAt/test_04.json", - "mainnet/eth_getStorageAt/test_07.json", - "mainnet/eth_getStorageAt/test_08.json", - "mainnet/eth_getTransactionByBlockNumberAndIndex/test_02.json", - "mainnet/eth_getTransactionByBlockNumberAndIndex/test_08.json", - "mainnet/eth_getTransactionByBlockNumberAndIndex/test_09.json", - "mainnet/eth_getTransactionCount/test_02.json", - "mainnet/eth_getTransactionCount/test_07.json", - "mainnet/eth_getTransactionCount/test_08.json", - "mainnet/eth_getUncleCountByBlockNumber/test_03.json", - "mainnet/eth_getUncleByBlockNumberAndIndex/test_02.json", - "mainnet/eth_maxPriorityFeePerGas", - "mainnet/eth_simulateV1/test_04.json", - "mainnet/eth_simulateV1/test_05.json", - "mainnet/eth_simulateV1/test_06.json", - "mainnet/eth_simulateV1/test_07.json", - "mainnet/eth_simulateV1/test_12.json", - "mainnet/eth_simulateV1/test_13.json", - "mainnet/eth_simulateV1/test_14.json", - "mainnet/eth_simulateV1/test_15.json", - "mainnet/eth_simulateV1/test_16.json", - "mainnet/eth_simulateV1/test_25.json", - "mainnet/eth_simulateV1/test_27.json", - "mainnet/erigon_blockNumber/test_4.json", - "mainnet/erigon_blockNumber/test_6.json", - "mainnet/ots_hasCode/test_10.json", - "mainnet/ots_searchTransactionsBefore/test_02.json", - "mainnet/parity_listStorageKeys", - "mainnet/trace_block/test_25.json", - "mainnet/trace_call/test_26.json", - "mainnet/trace_call/test_27.json", - "mainnet/trace_call/test_28.json", - "mainnet/trace_call/test_29.json", - "mainnet/trace_callMany/test_15.json", - "mainnet/trace_filter/test_25.json", - "mainnet/trace_replayBlockTransactions/test_36.json", - } + "github.com/erigontech/rpc-tests/internal/config" + "github.com/erigontech/rpc-tests/internal/runner" ) -type JsonDiffKind int - -const ( - JdLibrary JsonDiffKind = iota - JsonDiffTool - DiffTool - JsonDiffGo -) - -func (k JsonDiffKind) String() string { - return [...]string{"jd", "json-diff", "diff", "json-diff-go"}[k] -} - -// ParseJsonDiffKind converts a string into a JsonDiffKind enum type -func ParseJsonDiffKind(s string) (JsonDiffKind, error) { - switch strings.ToLower(s) { - case "jd": - return JdLibrary, nil - case "json-diff": - return JsonDiffTool, nil - case "diff": - return DiffTool, nil - case "json-diff-go": - return JsonDiffGo, nil - default: - return JdLibrary, fmt.Errorf("invalid JsonDiffKind value: %s", s) - } -} - -type Config struct { - ExitOnFail bool - DaemonUnderTest string - DaemonAsReference string - LoopNumber int - VerboseLevel int - ReqTestNumber int - ForceDumpJSONs bool - ExternalProviderURL string - DaemonOnHost string - ServerPort int - EnginePort int - TestingAPIsWith string - TestingAPIs string - VerifyWithDaemon bool - Net string - JSONDir string - ResultsDir string - OutputDir string - ExcludeAPIList string - ExcludeTestList string - StartTest string - JWTSecret string - DisplayOnlyFail bool - TransportType string - Parallel bool - DiffKind JsonDiffKind - WithoutCompareResults bool - WaitingTime int - DoNotCompareError bool - TestsOnLatestBlock bool - LocalServer string - SanitizeArchiveExt bool - CpuProfile string - MemProfile string - TraceFile string -} - -type TestMetrics struct { - RoundTripTime time.Duration - MarshallingTime time.Duration - UnmarshallingTime time.Duration - ComparisonCount int - EqualCount int -} - -type TestOutcome struct { - Success bool - Error error - ColoredDiff string - Metrics TestMetrics -} - -type TestResult struct { - Outcome TestOutcome - Test *TestDescriptor -} - -type TestDescriptor struct { - Name string - Number int - TransportType string - ResultChan chan TestResult -} - -type JsonRpcResponseMetadata struct { - PathOptions jsoniter.RawMessage `json:"pathOptions"` -} - -type JsonRpcTestMetadata struct { - Request interface{} `json:"request"` - Response *JsonRpcResponseMetadata `json:"response"` -} - -type JsonRpcTest struct { - Identifier string `json:"id"` - Reference string `json:"reference"` - Description string `json:"description"` - Metadata *JsonRpcTestMetadata `json:"metadata"` -} - -type JsonRpcCommand struct { - Request jsoniter.RawMessage `json:"request"` - Response any `json:"response"` - TestInfo *JsonRpcTest `json:"test"` -} - -func NewConfig() *Config { - return &Config{ - ExitOnFail: true, - DaemonUnderTest: DaemonOnDefaultPort, - DaemonAsReference: None, - LoopNumber: 1, - VerboseLevel: 0, - ReqTestNumber: -1, - ForceDumpJSONs: false, - ExternalProviderURL: "", - DaemonOnHost: "localhost", - ServerPort: 0, - EnginePort: 0, - TestingAPIsWith: "", - TestingAPIs: "", - VerifyWithDaemon: false, - Net: "mainnet", - ResultsDir: "results", - JWTSecret: "", - DisplayOnlyFail: false, - TransportType: "http", - Parallel: true, - DiffKind: JsonDiffGo, - WithoutCompareResults: false, - WaitingTime: 0, - DoNotCompareError: false, - TestsOnLatestBlock: false, - SanitizeArchiveExt: false, - } -} - -func (c *Config) parseFlags() error { +func parseFlags(cfg *config.Config) error { help := flag.Bool("h", false, "print help") flag.BoolVar(help, "help", false, "print help") @@ -393,8 +85,8 @@ func (c *Config) parseFlags() error { excludeTestList := flag.String("X", "", "exclude test list") flag.StringVar(excludeTestList, "exclude-test-list", "", "exclude test list") - diffKind := flag.String("j", c.DiffKind.String(), "diff for JSON values, one of: jd, json-diff, diff") - flag.StringVar(diffKind, "json-diff", c.DiffKind.String(), "diff for JSON values, one of: jd, json-diff, diff") + diffKind := flag.String("j", cfg.DiffKind.String(), "diff for JSON values") + flag.StringVar(diffKind, "json-diff", cfg.DiffKind.String(), "diff for JSON values") waitingTime := flag.Int("w", 0, "waiting time in milliseconds") flag.IntVar(waitingTime, "waiting-time", 0, "waiting time in milliseconds") @@ -419,126 +111,82 @@ func (c *Config) parseFlags() error { os.Exit(0) } - // Validation and conflicts - if *waitingTime > 0 && c.Parallel { - return fmt.Errorf("waiting-time is not compatible with parallel tests") - } - - if *daemonPort && *compareErigon { - return fmt.Errorf("daemon-port is not compatible with compare-erigon-rpcdaemon") - } - - if *testNumber != -1 && (*excludeTestList != "" || *excludeAPIList != "") { - return fmt.Errorf("run-test is not compatible with exclude-api-list or exclude-test-list") - } - - if *apiList != "" && *excludeAPIList != "" { - return fmt.Errorf("api-list is not compatible with exclude-api-list") - } - - if *compareErigon && *withoutCompare { - return fmt.Errorf("compare-erigon-rpcdaemon is not compatible with without-compare-results") - } - - // Apply configuration - c.ExitOnFail = !*continueOnFail - c.VerboseLevel = *verbose - c.ReqTestNumber = *testNumber - c.LoopNumber = *loops - c.DaemonOnHost = *host - c.ServerPort = *port - c.EnginePort = *enginePort - c.DisplayOnlyFail = *displayOnlyFail - c.TestingAPIsWith = *apiListWith - c.TestingAPIs = *apiList - c.Net = *blockchain - c.ExcludeAPIList = *excludeAPIList - c.ExcludeTestList = *excludeTestList - c.StartTest = *startTest - c.TransportType = *transportType - c.WaitingTime = *waitingTime - c.ForceDumpJSONs = *dumpResponse - c.WithoutCompareResults = *withoutCompare - c.DoNotCompareError = *doNotCompareError - c.TestsOnLatestBlock = *testOnLatest - c.Parallel = !*serial - c.CpuProfile = *cpuProfile - c.MemProfile = *memProfile - c.TraceFile = *traceFile - - kind, err := ParseJsonDiffKind(*diffKind) + cfg.ExitOnFail = !*continueOnFail + cfg.Parallel = !*serial + cfg.VerboseLevel = *verbose + cfg.ReqTestNum = *testNumber + cfg.LoopNumber = *loops + cfg.DaemonOnHost = *host + cfg.ServerPort = *port + cfg.EnginePort = *enginePort + cfg.DisplayOnlyFail = *displayOnlyFail + cfg.TestingAPIsWith = *apiListWith + cfg.TestingAPIs = *apiList + cfg.Net = *blockchain + cfg.ExcludeAPIList = *excludeAPIList + cfg.ExcludeTestList = *excludeTestList + cfg.StartTest = *startTest + cfg.TransportType = *transportType + cfg.WaitingTime = *waitingTime + cfg.ForceDumpJSONs = *dumpResponse + cfg.WithoutCompareResults = *withoutCompare + cfg.DoNotCompareError = *doNotCompareError + cfg.TestsOnLatestBlock = *testOnLatest + cfg.CpuProfile = *cpuProfile + cfg.MemProfile = *memProfile + cfg.TraceFile = *traceFile + + kind, err := config.ParseDiffKind(*diffKind) if err != nil { return err } - c.DiffKind = kind + cfg.DiffKind = kind if *daemonPort { - c.DaemonUnderTest = DaemonOnOtherPort + cfg.DaemonUnderTest = config.DaemonOnOtherPort } if *externalProvider != "" { - c.DaemonAsReference = ExternalProvider - c.ExternalProviderURL = *externalProvider - c.VerifyWithDaemon = true + cfg.DaemonAsReference = config.ExternalProvider + cfg.ExternalProviderURL = *externalProvider + cfg.VerifyWithDaemon = true } if *compareErigon { - c.VerifyWithDaemon = true - c.DaemonAsReference = DaemonOnDefaultPort + cfg.VerifyWithDaemon = true + cfg.DaemonAsReference = config.DaemonOnDefaultPort } if *createJWT != "" { - if err := generateJWTSecret(*createJWT, 64); err != nil { + if err := config.GenerateJWTSecret(*createJWT, 64); err != nil { return fmt.Errorf("failed to create JWT secret: %v", err) } - secret, err := getJWTSecret(*createJWT) + secret, err := config.GetJWTSecret(*createJWT) if err != nil { return fmt.Errorf("failed to read JWT secret: %v", err) } - c.JWTSecret = secret + cfg.JWTSecret = secret } else if *jwtFile != "" { - secret, err := getJWTSecret(*jwtFile) + secret, err := config.GetJWTSecret(*jwtFile) if err != nil { return fmt.Errorf("secret file not found: %s", *jwtFile) } - c.JWTSecret = secret + cfg.JWTSecret = secret } - // Validate transport type - if *transportType != "" { - types := strings.Split(*transportType, ",") - for _, t := range types { - if t != "websocket" && t != "http" && t != "http_comp" && t != "https" && t != "websocket_comp" { - return fmt.Errorf("invalid connection type: %s", t) - } - } + if err := cfg.Validate(); err != nil { + return err } - c.UpdateDirs() + cfg.UpdateDirs() - // Remove output directory if exists - if _, err := os.Stat(c.OutputDir); err == nil { - err := os.RemoveAll(c.OutputDir) - if err != nil { - return err - } + if err := cfg.CleanOutputDir(); err != nil { + return err } return nil } -func (c *Config) UpdateDirs() { - c.JSONDir = "./integration/" + c.Net + "/" - c.OutputDir = c.JSONDir + c.ResultsDir + "/" - if c.ServerPort == 0 { - c.ServerPort = 8545 - } - if c.EnginePort == 0 { - c.EnginePort = 8551 - } - c.LocalServer = "http://" + c.DaemonOnHost + ":" + strconv.Itoa(c.ServerPort) -} - func usage() { fmt.Println("Usage: rpc_int [options]") fmt.Println("") @@ -574,1559 +222,93 @@ func usage() { fmt.Println(" -L, --tests-on-latest-block runs only test on latest block") } -func getTarget(targetType, method string, config *Config) string { - isEngine := strings.HasPrefix(method, "engine_") - - if targetType == ExternalProvider { - return config.ExternalProviderURL - } - - if config.VerifyWithDaemon && targetType == DaemonOnOtherPort && isEngine { - return config.DaemonOnHost + ":51516" - } - - if config.VerifyWithDaemon && targetType == DaemonOnOtherPort { - return config.DaemonOnHost + ":51515" - } - - if targetType == DaemonOnOtherPort && isEngine { - return config.DaemonOnHost + ":51516" - } - - if targetType == DaemonOnOtherPort { - return config.DaemonOnHost + ":51515" - } - - if isEngine { - port := config.EnginePort - if port == 0 { - port = 8551 - } - return config.DaemonOnHost + ":" + strconv.Itoa(port) - } - - port := config.ServerPort - if port == 0 { - port = 8545 - } - return config.DaemonOnHost + ":" + strconv.Itoa(port) -} - -func getJSONFilenameExt(targetType, target string) string { - parts := strings.Split(target, ":") - port := "" - if len(parts) > 1 { - port = parts[1] - } - - if targetType == DaemonOnOtherPort { - return "_" + port + "-daemon.json" - } - if targetType == ExternalProvider { - return "-external_provider_url.json" - } - return "_" + port + "-rpcdaemon.json" -} - -func getJWTSecret(filename string) (string, error) { - data, err := os.ReadFile(filename) - if err != nil { - return "", err - } - contents := string(data) - if len(contents) >= 2 && contents[:2] == "0x" { - return contents[2:], nil - } - return strings.TrimSpace(contents), nil -} - -func generateJWTSecret(filename string, length int) error { - if length <= 0 { - length = 64 - } - randomBytes := make([]byte, length/2) - if _, err := rand.Read(randomBytes); err != nil { - return err - } - randomHex := "0x" + hex.EncodeToString(randomBytes) - if err := os.WriteFile(filename, []byte(randomHex), 0600); err != nil { - return err - } - fmt.Printf("Secret File '%s' created with success!\n", filename) - return nil -} - -func extractNumber(filename string) int { - re := regexp.MustCompile(`\d+`) - match := re.FindString(filename) - if match != "" { - num, _ := strconv.Atoi(match) - return num - } - return 0 -} - -func checkTestNameForNumber(testName string, reqTestNumber int) bool { - if reqTestNumber == -1 { - return true - } - pattern := "_0*" + strconv.Itoa(reqTestNumber) + "($|[^0-9])" - matched, _ := regexp.MatchString(pattern, testName) - return matched -} - -func isSkipped(currAPI, testName string, globalTestNumber int, config *Config) bool { - apiFullName := config.Net + "/" + currAPI - apiFullTestName := config.Net + "/" + testName - - if (config.ReqTestNumber == -1 || config.TestingAPIs != "" || config.TestingAPIsWith != "") && - !(config.ReqTestNumber != -1 && (config.TestingAPIs != "" || config.TestingAPIsWith != "")) && - config.ExcludeAPIList == "" && config.ExcludeTestList == "" { - for _, currTestName := range apiNotCompared { - if strings.Contains(apiFullName, currTestName) { - return true - } - } - } - - if config.ExcludeAPIList != "" { - excludeAPIs := strings.Split(config.ExcludeAPIList, ",") - for _, excludeAPI := range excludeAPIs { - if strings.Contains(apiFullName, excludeAPI) || strings.Contains(apiFullTestName, excludeAPI) { - return true - } - } - } - - if config.ExcludeTestList != "" { - excludeTests := strings.Split(config.ExcludeTestList, ",") - for _, excludeTest := range excludeTests { - if excludeTest == strconv.Itoa(globalTestNumber) { - return true - } - } - } - - return false -} - -func verifyInLatestList(testName string, config *Config) bool { - apiFullTestName := config.Net + "/" + testName - if config.TestsOnLatestBlock { - for _, currTest := range testsOnLatest { - if strings.Contains(apiFullTestName, currTest) { - return true - } - } - } - return false -} - -func apiUnderTest(currAPI, testName string, config *Config) bool { - if config.TestingAPIsWith == "" && config.TestingAPIs == "" && !config.TestsOnLatestBlock { - return true - } - - if config.TestingAPIsWith != "" { - tests := strings.Split(config.TestingAPIsWith, ",") - for _, test := range tests { - if strings.Contains(currAPI, test) { - if config.TestsOnLatestBlock && verifyInLatestList(testName, config) { - return true - } - if config.TestsOnLatestBlock { - return false - } - return true - } - } - return false - } - - if config.TestingAPIs != "" { - tests := strings.Split(config.TestingAPIs, ",") - for _, test := range tests { - if test == currAPI { - if config.TestsOnLatestBlock && verifyInLatestList(testName, config) { - return true - } - if config.TestsOnLatestBlock { - return false - } - return true - } - } - return false - } - - if config.TestsOnLatestBlock { - return verifyInLatestList(testName, config) - } - - return false -} - -func dumpJSONs(dumpJSON bool, daemonFile, expRspFile, outputDir string, response, expectedResponse any, metrics *TestMetrics) error { - if !dumpJSON { - return nil - } +func runMain() int { + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - if err := os.MkdirAll(outputDir, 0755); err != nil { - return fmt.Errorf("Exception on makedirs: %s %v\n", outputDir, err) + cfg := config.NewConfig() + if err := parseFlags(cfg); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + usage() + return -1 } - if daemonFile != "" { - start := time.Now() - responseData, err := jsoniter.MarshalIndent(response, "", " ") + // CPU profiling + if cfg.CpuProfile != "" { + f, err := os.Create(cfg.CpuProfile) if err != nil { - return err + fmt.Fprintf(os.Stderr, "could not create CPU profile: %v\n", err) } - metrics.MarshallingTime += time.Since(start) - if err := os.WriteFile(daemonFile, responseData, 0644); err != nil { - return fmt.Errorf("Exception on file write daemon: %v\n", err) + defer f.Close() + if err := pprof.StartCPUProfile(f); err != nil { + fmt.Fprintf(os.Stderr, "could not start CPU profile: %v\n", err) } + defer pprof.StopCPUProfile() } - if expRspFile != "" { - start := time.Now() - expectedResponseData, err := jsoniter.MarshalIndent(expectedResponse, "", " ") + // Execution tracing + if cfg.TraceFile != "" { + f, err := os.Create(cfg.TraceFile) if err != nil { - return err + fmt.Fprintf(os.Stderr, "could not create trace file: %v\n", err) } - metrics.MarshallingTime += time.Since(start) - if err := os.WriteFile(expRspFile, expectedResponseData, 0644); err != nil { - return fmt.Errorf("Exception on file write expected: %v\n", err) + defer f.Close() + if err := trace.Start(f); err != nil { + fmt.Fprintf(os.Stderr, "could not start trace: %v\n", err) } + defer trace.Stop() } - return nil -} - -const ( - identifierTag = "id" - jsonRpcTag = "jsonrpc" - resultTag = "result" - errorTag = "error" -) - -var ( - errJsonRpcUnexpectedFormat = errors.New("invalid JSON-RPC response format: neither object nor array") - errJsonRpcMissingVersion = errors.New("invalid JSON-RPC response: missing 'jsonrpc' field") - errJsonRpcMissingId = errors.New("invalid JSON-RPC response: missing 'id' field") - errJsonRpcNoncompliantVersion = errors.New("noncompliant JSON-RPC 2.0 version") - errJsonRpcMissingResultOrError = errors.New("JSON-RPC 2.0 response contains neither 'result' nor 'error'") - errJsonRpcContainsBothResultAndError = errors.New("JSON-RPC 2.0 response contains both 'result' and 'error'") -) - -// validateJsonRpcObject checks that the passed object is a valid JSON-RPC object, according to 2.0 spec. -// This implies that it must be a JSON object containing: -// - one mandatory "jsonrpc" field which must be equal to "2.0" -// - one mandatory "id" field which must match the value of the same field in the request -// https://www.jsonrpc.org/specification -func validateJsonRpcObject(object map[string]any) error { - // Ensure that the object is a valid JSON-RPC object. - jsonrpc, ok := object[jsonRpcTag] - if !ok { - return errJsonRpcMissingVersion - } - jsonrpcVersion, ok := jsonrpc.(string) - if jsonrpcVersion != "2.0" { - return errJsonRpcNoncompliantVersion - } - _, ok = object[identifierTag] - if !ok { - return errJsonRpcMissingId - } - return nil -} - -// validateJsonRpcResponseObject checks that the passed response is a valid JSON-RPC response, according to 2.0 spec. -// This implies that the response must be a valid JSON-RPC object plus: -// - either one "result" field in case of success or one "error" field otherwise, mutually exclusive -// The strict parameter relaxes the compliance requirements by allowing both 'result' and 'error' to be present -// TODO: strict parameter is required for corner cases in streaming mode when 'result' is emitted up-front -// https://www.jsonrpc.org/specification -func validateJsonRpcResponseObject(response map[string]any, strict bool) error { - // Ensure that the response is a valid JSON-RPC object. - err := validateJsonRpcObject(response) - if err != nil { - return err - } - _, hasResult := response[resultTag] - _, hasError := response[errorTag] - if strict && !hasResult && !hasError { - return errJsonRpcMissingResultOrError - } - if strict && hasResult && hasError { - return errJsonRpcContainsBothResultAndError - } - return nil -} -// validateJsonRpcResponse checks that the received response is a valid JSON-RPC message, according to 2.0 spec. -// This implies that the response must be either a valid JSON-RPC object, i.e. a JSON object containing at least -// "jsonrpc" and "id" fields or a JSON array where each element (if any) is in turn a valid JSON-RPC object. -func validateJsonRpcResponse(response any) error { - value := reflect.ValueOf(response) - if value.Kind() == reflect.Ptr { - value = value.Elem() - } - unwrappedResponse := value.Interface() - responseAsArray, isArray := (unwrappedResponse).([]any) - responseAsMap, isMap := (unwrappedResponse).(map[string]any) - if !isArray && !isMap { - return errJsonRpcUnexpectedFormat - } - if isMap { - // Ensure that the response is a valid JSON-RPC object. - err := validateJsonRpcResponseObject(responseAsMap, false) - if err != nil { - return err - } - } - if isArray { - for _, element := range responseAsArray { - elementAsMap, isElementMap := element.(map[string]any) - if !isElementMap { - return errJsonRpcUnexpectedFormat - } - err := validateJsonRpcResponseObject(elementAsMap, false) + // Memory profiling + defer func() { + if cfg.MemProfile != "" { + f, err := os.Create(cfg.MemProfile) if err != nil { - return err + fmt.Fprintf(os.Stderr, "could not create memory profile: %v\n", err) + } + defer f.Close() + runtime.GC() + if err := pprof.WriteHeapProfile(f); err != nil { + fmt.Fprintf(os.Stderr, "could not write memory profile: %v\n", err) } } - } - return nil -} - -func executeHttpRequest(ctx context.Context, config *Config, transportType, jwtAuth, target string, request []byte, response any, metrics *TestMetrics) error { - headers := map[string]string{ - "Content-Type": "application/json", - } - - if transportType != "http_comp" { - headers["Accept-Encoding"] = "Identity" - } - - if jwtAuth != "" { - headers["Authorization"] = jwtAuth - } - - targetURL := target - if transportType == "https" { - targetURL = "https://" + target - } else { - targetURL = "http://" + target - } - - client := &http.Client{ - Timeout: 300 * time.Second, - } - - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, bytes.NewBuffer(request)) - if err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nhttp request creation fail: %s %v\n", targetURL, err) - } - return err - } - - for k, v := range headers { - req.Header.Set(k, v) - } - - start := time.Now() - resp, err := client.Do(req) - elapsed := time.Since(start) - metrics.RoundTripTime = elapsed - if config.VerboseLevel > 1 { - fmt.Printf("http round-trip time: %v\n", elapsed) - } - if err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nhttp connection fail: %s %v\n", targetURL, err) - } - return err - } - defer func(Body io.ReadCloser) { - err := Body.Close() - if err != nil { - fmt.Printf("\nfailed to close response body: %v\n", err) - } - }(resp.Body) - - if resp.StatusCode != http.StatusOK { - if config.VerboseLevel > 1 { - fmt.Printf("\npost result status_code: %d\n", resp.StatusCode) - } - return fmt.Errorf("http status %v", resp.Status) - } - - start = time.Now() - if err = jsoniter.NewDecoder(resp.Body).Decode(response); err != nil { - return fmt.Errorf("cannot decode http body as json %w", err) - } - metrics.UnmarshallingTime += time.Since(start) - if err = validateJsonRpcResponse(response); err != nil { // TODO: improve or remove (casts as well) - return fmt.Errorf("json response in invalid: %w", err) - } - - if config.VerboseLevel > 1 { - raw, _ := jsoniter.Marshal(response) - fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(raw)) - } - - return nil -} - -type JsonRpcRequest struct { - Jsonrpc string `json:"jsonrpc"` - Method string `json:"method"` - Params []interface{} `json:"params"` - Id int `json:"id"` -} - -type JsonRpcResponse struct { - Result string `json:"result"` - Error *struct { - Message string `json:"message"` - } `json:"error"` -} - -func getLatestBlockNumber(ctx context.Context, config *Config, url string, metrics *TestMetrics) (uint64, error) { - request := JsonRpcRequest{ - Jsonrpc: "2.0", - Method: "eth_blockNumber", - Params: []interface{}{}, - Id: 1, - } - requestBytes, _ := jsoniter.Marshal(request) - - var response any - err := executeHttpRequest(ctx, config, "http", "", url, requestBytes, &response, metrics) - if err != nil { - return 0, err - } + }() - responseMap, ok := response.(map[string]interface{}) - if !ok { - return 0, fmt.Errorf("response is not a map: %v", response) - } - if resultVal, hasResult := responseMap["result"]; hasResult { - resultStr, isString := resultVal.(string) - if !isString { - return 0, fmt.Errorf("result is not a string: %v", resultVal) + // Clean temp dirs + if _, err := os.Stat(config.TempDirName); err == nil { + if err := os.RemoveAll(config.TempDirName); err != nil { + return -1 } - - cleanHex := strings.TrimPrefix(resultStr, "0x") - return strconv.ParseUint(cleanHex, 16, 64) - } - if errorVal, hasError := responseMap["error"]; hasError { - return 0, fmt.Errorf("RPC error: %v", errorVal) } - return 0, fmt.Errorf("no result or error found in response") -} - -func getConsistentLatestBlock(config *Config, server1URL, server2URL string, maxRetries int, retryDelay time.Duration) (uint64, error) { - var bn1, bn2 uint64 - metrics := TestMetrics{} - for i := 0; i < maxRetries; i++ { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - - var err1, err2 error - bn1, err1 = getLatestBlockNumber(ctx, config, server1URL, &metrics) - bn2, err2 = getLatestBlockNumber(ctx, config, server2URL, &metrics) - cancel() - - if config.VerboseLevel > 1 { - fmt.Printf("retry: %d nodes: %s, %s latest blocks: %d, %d\n", i+1, server1URL, server2URL, bn1, bn2) - } + ctx, cancelCtx := context.WithCancel(context.Background()) - if err1 == nil && err2 == nil && bn1 == bn2 { - return bn1, nil + go func() { + for { + select { + case sig := <-sigs: + fmt.Printf("\nReceived signal: %s. Starting graceful shutdown...\n", sig) + cancelCtx() + case <-ctx.Done(): + return + } } + }() - if i < maxRetries-1 { - time.Sleep(retryDelay) + defer func() { + if r := recover(); r != nil { + fmt.Println("\nCRITICAL: TEST SEQUENCE INTERRUPTED!") } - } - - return 0, fmt.Errorf("nodes not synced, last values: %d / %d", bn1, bn2) -} - -func executeWebSocketRequest(config *Config, transportType, jwtAuth, target string, request []byte, response any, metrics *TestMetrics) error { - wsTarget := "ws://" + target - dialer := websocket.Dialer{ - HandshakeTimeout: 300 * time.Second, - EnableCompression: strings.HasSuffix(transportType, "_comp"), - } - - headers := http.Header{} - if jwtAuth != "" { - headers.Set("Authorization", jwtAuth) - } + }() - conn, _, err := dialer.Dial(wsTarget, headers) + exitCode, err := runner.Run(ctx, cancelCtx, cfg) if err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nwebsocket connection fail: %v\n", err) - } - return err - } - defer func(conn *websocket.Conn) { - err := conn.Close() - if err != nil { - fmt.Printf("\nfailed to close websocket connection: %v\n", err) - } - }(conn) - - start := time.Now() - if err = conn.WriteMessage(websocket.BinaryMessage, request); err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nwebsocket write fail: %v\n", err) - } - return err - } - - _, message, err := conn.NextReader() - if err != nil { - if config.VerboseLevel > 0 { - fmt.Printf("\nwebsocket read fail: %v\n", err) - } - return err - } - metrics.RoundTripTime = time.Since(start) - - start = time.Now() - if err = jsoniter.NewDecoder(message).Decode(&response); err != nil { - return fmt.Errorf("cannot decode websocket message as json %w", err) - } - metrics.UnmarshallingTime += time.Since(start) - if err = validateJsonRpcResponse(response); err != nil { // TODO: improve or remove (casts as well) - return fmt.Errorf("json response in invalid %w", err) - } - - if config.VerboseLevel > 1 { - raw, _ := jsoniter.Marshal(response) - fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(raw)) - } - - return nil -} - -func executeRequest(ctx context.Context, config *Config, transportType, jwtAuth, target string, request []byte, response any, metrics *TestMetrics) error { - if strings.HasPrefix(transportType, "http") { - return executeHttpRequest(ctx, config, transportType, jwtAuth, target, request, response, metrics) - } - return executeWebSocketRequest(config, transportType, jwtAuth, target, request, response, metrics) -} - -func runCompare(jsonDiff bool, errorFile, tempFile1, tempFile2, diffFile string) bool { - var cmd *exec.Cmd - alreadyFailed := false - - if jsonDiff { - // Check if json-diff is available - checkCmd := exec.Command("json-diff", "--help") - if err := checkCmd.Run(); err != nil { - jsonDiff = false - } - } - - if jsonDiff { - cmd = exec.Command("sh", "-c", fmt.Sprintf("json-diff -s %s %s > %s 2> %s", tempFile2, tempFile1, diffFile, errorFile)) - alreadyFailed = false - } else { - cmd = exec.Command("sh", "-c", fmt.Sprintf("diff %s %s > %s 2> %s", tempFile2, tempFile1, diffFile, errorFile)) - alreadyFailed = true - } - - if err := cmd.Start(); err != nil { - return false - } - - done := make(chan error) - go func() { - done <- cmd.Wait() - }() - - timeout := time.After(time.Duration(MaxTime) * TimeInterval) - ticker := time.NewTicker(TimeInterval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - // Check if the process is still running - continue - case err := <-done: - // Process completed - if err != nil { - // Non-zero exit, which is expected for diff when files differ - } - - // Check error file size - fileInfo, err := os.Stat(errorFile) - if err == nil && fileInfo.Size() != 0 { - if alreadyFailed { - return false - } - // Try with diff instead - alreadyFailed = true - cmd = exec.Command("sh", "-c", fmt.Sprintf("diff %s %s > %s 2> %s", tempFile2, tempFile1, diffFile, errorFile)) - if err := cmd.Start(); err != nil { - return false - } - go func() { - done <- cmd.Wait() - }() - continue - } - return true - case <-timeout: - // Timeout reached, kill the process - if cmd.Process != nil { - err := cmd.Process.Kill() - if err != nil { - return false - } - } - if alreadyFailed { - return false - } - // Try with diff instead - alreadyFailed = true - cmd = exec.Command("sh", "-c", fmt.Sprintf("diff %s %s > %s 2> %s", tempFile2, tempFile1, diffFile, errorFile)) - if err := cmd.Start(); err != nil { - return false - } - go func() { - done <- cmd.Wait() - }() - timeout = time.After(time.Duration(MaxTime) * TimeInterval) - } - } -} - -var ( - errDiffTimeout = errors.New("diff timeout") - errDiffMismatch = errors.New("diff mismatch") -) - -func isArchive(jsonFilename string) bool { - // Treat all files except .json as potential archive files - return !strings.HasSuffix(jsonFilename, ".json") -} - -func extractJsonCommands(jsonFilename string, sanitizeExtension bool, metrics *TestMetrics) ([]JsonRpcCommand, error) { - var jsonrpcCommands []JsonRpcCommand - err := archive.Extract(jsonFilename, sanitizeExtension, func(reader *tar.Reader) error { - bufferedReader := bufio.NewReaderSize(reader, 8*os.Getpagesize()) - start := time.Now() - if err := jsoniter.NewDecoder(bufferedReader).Decode(&jsonrpcCommands); err != nil { - return fmt.Errorf("failed to decode JSON: %w", err) - } - metrics.UnmarshallingTime += time.Since(start) - return nil - }) - if err != nil { - return nil, errors.New("cannot extract archive file " + jsonFilename) - } - return jsonrpcCommands, nil -} - -func readJsonCommands(jsonFilename string, metrics *TestMetrics) ([]JsonRpcCommand, error) { - file, err := os.Open(jsonFilename) - if err != nil { - return nil, fmt.Errorf("cannot open file %s: %w", jsonFilename, err) - } - defer func(file *os.File) { - err = file.Close() - if err != nil { - fmt.Printf("failed to close file %s: %v\n", jsonFilename, err) - } - }(file) - - reader := bufio.NewReaderSize(file, 8*os.Getpagesize()) - - var jsonrpcCommands []JsonRpcCommand - start := time.Now() - if err := jsoniter.NewDecoder(reader).Decode(&jsonrpcCommands); err != nil { - return nil, fmt.Errorf("cannot parse JSON %s: %w", jsonFilename, err) - } - metrics.UnmarshallingTime += time.Since(start) - return jsonrpcCommands, nil -} - -func (c *JsonRpcCommand) compareJSONFiles(kind JsonDiffKind, errorFileName, fileName1, fileName2, diffFileName string) (bool, error) { - switch kind { - case JdLibrary: - if success, err := c.runCompareJD(fileName1, fileName2, diffFileName); !success { - return false, fmt.Errorf("failed to compare %s and %s using jd command %s", fileName1, fileName2, err) - } - return true, nil - case JsonDiffTool: - if success := runCompare(true, errorFileName, fileName1, fileName2, diffFileName); !success { - return false, fmt.Errorf("failed to compare %s and %s using json-diff command", fileName1, fileName2) - } - return true, nil - case DiffTool: - if success := runCompare(false, errorFileName, fileName1, fileName2, diffFileName); !success { - return false, fmt.Errorf("failed to compare %s and %s using diff command", fileName1, fileName2) - } - return true, nil - default: - return false, fmt.Errorf("unknown JSON diff kind: %d", kind) - } -} - -func (c *JsonRpcCommand) runCompareJD(fileName1, fileName2, diffFileName string) (bool, error) { - jsonNode1, err := jd.ReadJsonFile(fileName1) - if err != nil { - return false, err - } - jsonNode2, err := jd.ReadJsonFile(fileName2) - if err != nil { - return false, err - } - - type result struct { - diff jd.Diff - err error - } - - resChan := make(chan result, 1) - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - go func() { - var d jd.Diff - var e error - - if c.TestInfo != nil && c.TestInfo.Metadata != nil && c.TestInfo.Metadata.Response != nil { - if c.TestInfo.Metadata.Response.PathOptions != nil { - pathOptions := c.TestInfo.Metadata.Response.PathOptions - options, err := jd.ReadOptionsString(string(pathOptions)) - if err != nil { - resChan <- result{err: err} - return - } - d = jsonNode1.Diff(jsonNode2, options...) - } else { - d = jsonNode1.Diff(jsonNode2) - } - } else { - d = jsonNode1.Diff(jsonNode2) - } - - resChan <- result{diff: d, err: e} - }() - - select { - case <-ctx.Done(): - return false, fmt.Errorf("JSON diff (JD) timeout: operation exceeded timeout for files %s and %s", fileName1, fileName2) - - case res := <-resChan: - if res.err != nil { - return false, res.err - } - - diffString := res.diff.Render() - err = os.WriteFile(diffFileName, []byte(diffString), 0644) - if err != nil { - return false, err - } - return true, nil - } -} - -func (c *JsonRpcCommand) compareJSON(config *Config, daemonFile, expRspFile, diffFile string, metrics *TestMetrics) (bool, error) { - metrics.ComparisonCount += 1 - - diffFileSize := int64(0) - diffResult, err := c.compareJSONFiles(config.DiffKind, "/dev/null", expRspFile, daemonFile, diffFile) - if diffResult { - fileInfo, err := os.Stat(diffFile) - if err != nil { - return false, err - } - diffFileSize = fileInfo.Size() - } - - if diffFileSize != 0 || !diffResult { - if !diffResult { - err = errDiffTimeout - } else { - err = errDiffMismatch - } - return false, err - } - - return true, nil -} - -func (c *JsonRpcCommand) processResponse(response, result1, responseInFile any, config *Config, outputDir, daemonFile, expRspFile, diffFile string, outcome *TestOutcome) { - var expectedResponse any - if result1 != nil { - expectedResponse = result1 - } else { - expectedResponse = responseInFile - } - - if config.WithoutCompareResults { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) - if err != nil { - outcome.Error = err - return - } - outcome.Success = true - return - } - - mapsEqual := func(lhs, rhs map[string]interface{}) bool { - if len(lhs) != len(rhs) { - return false - } - for k, lv := range lhs { - rv, ok := rhs[k] - if !ok || !reflect.DeepEqual(lv, rv) { - return false - } - } - return true - } - arrayEqual := func(lhs, rhs []map[string]interface{}) bool { - if len(lhs) != len(rhs) { - return false - } - for i := 0; i < len(lhs); i++ { - if !mapsEqual(lhs[i], rhs[i]) { - return false - } - } - return true - } - compareResponses := func(lhs, rhs any) bool { - leftMap, leftIsMap := lhs.(map[string]interface{}) - rightMap, rightIsMap := rhs.(map[string]interface{}) - if leftIsMap && rightIsMap { - return mapsEqual(leftMap, rightMap) - } - leftArray, leftIsArray := lhs.([]map[string]interface{}) - rightArray, rightIsArray := rhs.([]map[string]interface{}) - if leftIsArray && rightIsArray { - return arrayEqual(leftArray, rightArray) - } - return reflect.DeepEqual(lhs, rhs) - } - // Fast path: if actual/expected are identical, no need to compare them - if compareResponses(response, expectedResponse) { - outcome.Metrics.EqualCount += 1 - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) - if err != nil { - outcome.Error = err - return - } - outcome.Success = true - return - } - - // Check various conditions where we don't care about differences - responseMap, respIsMap := response.(map[string]interface{}) // TODO: remove redundant casts - expectedMap, expIsMap := expectedResponse.(map[string]interface{}) // TODO: remove redundant casts - if respIsMap && expIsMap { // TODO: extract function ignoreDifferences and handle JSON batch responses - _, responseHasResult := responseMap["result"] - expectedResult, expectedHasResult := expectedMap["result"] - _, responseHasError := responseMap["error"] - expectedError, expectedHasError := expectedMap["error"] - if responseHasResult && expectedHasResult && expectedResult == nil && result1 == nil { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) - if err != nil { - outcome.Error = err - return - } - outcome.Success = true - return - } - if responseHasError && expectedHasError && expectedError == nil { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) - if err != nil { - outcome.Error = err - return - } - outcome.Success = true - return - } - // TODO: improve len(expectedMap) == 2 which means: just "jsonrpc" and "id" are expected - if !expectedHasResult && !expectedHasError && len(expectedMap) == 2 { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) - if err != nil { - outcome.Error = err - return - } - outcome.Success = true - return - } - if responseHasError && expectedHasError && config.DoNotCompareError { - err := dumpJSONs(config.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) - if err != nil { - outcome.Error = err - return - } - outcome.Success = true - return - } - } - - // We need to compare the response and expectedResponse, so we dump them to files first - err := dumpJSONs(true, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) - if err != nil { - outcome.Error = err - return - } - - var same bool - if config.DiffKind == JsonDiffGo { // TODO: move within compareJSON - outcome.Metrics.ComparisonCount += 1 - opts := &jsondiff.Options{ - SortArrays: true, - } - if respIsMap && expIsMap { - diff := jsondiff.DiffJSON(expectedMap, responseMap, opts) - same = len(diff) == 0 - diffString := jsondiff.DiffString(expectedMap, responseMap, opts) - err = os.WriteFile(diffFile, []byte(diffString), 0644) - if err != nil { - outcome.Error = err - return - } - if !same { - outcome.Error = errDiffMismatch - if config.ReqTestNumber != -1 { // only when a single test is run TODO: add option to control it - outcome.ColoredDiff = jsondiff.ColoredString(expectedMap, responseMap, opts) - } - } - } else { - responseArray, respIsArray := response.([]any) - expectedArray, expIsArray := expectedResponse.([]any) - if !respIsArray || !expIsArray { - outcome.Error = errors.New("cannot compare JSON objects (neither maps nor arrays)") - return - } - diff := jsondiff.DiffJSON(expectedArray, responseArray, opts) - same = len(diff) == 0 - diffString := jsondiff.DiffString(expectedArray, responseArray, opts) - err = os.WriteFile(diffFile, []byte(diffString), 0644) - if err != nil { - outcome.Error = err - return - } - if !same { - outcome.Error = errDiffMismatch - if config.ReqTestNumber != -1 { // only when a single test is run TODO: add option to control it - outcome.ColoredDiff = jsondiff.ColoredString(expectedArray, responseArray, opts) - } - } - } - } else { - same, err = c.compareJSON(config, daemonFile, expRspFile, diffFile, &outcome.Metrics) - if err != nil { - outcome.Error = err - return - } - } - - if same && !config.ForceDumpJSONs { - err := os.Remove(daemonFile) - if err != nil { - outcome.Error = err - return - } - err = os.Remove(expRspFile) - if err != nil { - outcome.Error = err - return - } - err = os.Remove(diffFile) - if err != nil { - outcome.Error = err - return - } - } - - outcome.Success = same -} - -func (c *JsonRpcCommand) run(ctx context.Context, config *Config, descriptor *TestDescriptor, outcome *TestOutcome) { - transportType := descriptor.TransportType - jsonFile := descriptor.Name - request := c.Request - - target := getTarget(config.DaemonUnderTest, descriptor.Name, config) - target1 := "" - - var jwtAuth string - if config.JWTSecret != "" { - secretBytes, _ := hex.DecodeString(config.JWTSecret) - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "iat": time.Now().Unix(), - }) - tokenString, _ := token.SignedString(secretBytes) - jwtAuth = "Bearer " + tokenString - } - - outputAPIFilename := filepath.Join(config.OutputDir, strings.TrimSuffix(jsonFile, filepath.Ext(jsonFile))) - outputDirName := filepath.Dir(outputAPIFilename) - diffFile := outputAPIFilename + "-diff.json" - - if !config.VerifyWithDaemon { - var result any - err := executeRequest(ctx, config, transportType, jwtAuth, target, request, &result, &outcome.Metrics) - if err != nil { - outcome.Error = err - return - } - if config.VerboseLevel > 2 { - fmt.Printf("%s: [%v]\n", config.DaemonUnderTest, result) - } - - responseInFile := c.Response - daemonFile := outputAPIFilename + "-response.json" - expRspFile := outputAPIFilename + "-expResponse.json" - - c.processResponse(result, nil, responseInFile, config, outputDirName, daemonFile, expRspFile, diffFile, outcome) - } else { - target = getTarget(DaemonOnDefaultPort, descriptor.Name, config) - var result any - err := executeRequest(ctx, config, transportType, jwtAuth, target, request, &result, &outcome.Metrics) - if err != nil { - outcome.Error = err - return - } - if config.VerboseLevel > 2 { - fmt.Printf("%s: [%v]\n", config.DaemonUnderTest, result) - } - - target1 = getTarget(config.DaemonAsReference, descriptor.Name, config) - var result1 any - err = executeRequest(ctx, config, transportType, jwtAuth, target1, request, &result1, &outcome.Metrics) - if err != nil { - outcome.Error = err - return - } - if config.VerboseLevel > 2 { - fmt.Printf("%s: [%v]\n", config.DaemonAsReference, result1) - } - - daemonFile := outputAPIFilename + getJSONFilenameExt(DaemonOnDefaultPort, target) - expRspFile := outputAPIFilename + getJSONFilenameExt(config.DaemonAsReference, target1) - - c.processResponse(result, result1, nil, config, outputDirName, daemonFile, expRspFile, diffFile, outcome) - return - } -} - -func runTest(ctx context.Context, descriptor *TestDescriptor, config *Config) TestOutcome { - jsonFilename := filepath.Join(config.JSONDir, descriptor.Name) - - outcome := TestOutcome{} - - var jsonrpcCommands []JsonRpcCommand - var err error - if isArchive(jsonFilename) { - jsonrpcCommands, err = extractJsonCommands(jsonFilename, config.SanitizeArchiveExt, &outcome.Metrics) - } else { - jsonrpcCommands, err = readJsonCommands(jsonFilename, &outcome.Metrics) - } - if err != nil { - outcome.Error = err - return outcome - } - - if len(jsonrpcCommands) != 1 { - outcome.Error = errors.New("expected exactly one JSON RPC command in " + jsonFilename) - return outcome - } - - jsonrpcCommands[0].run(ctx, config, descriptor, &outcome) - - return outcome -} - -func mustAtoi(s string) int { - if s == "" { - return 0 - } - n, _ := strconv.Atoi(s) - return n -} - -type ResultCollector struct { - resultsChan chan chan TestResult - config *Config - successTests int - failedTests int - executedTests int - - totalRoundTripTime time.Duration - totalMarshallingTime time.Duration - totalUnmarshallingTime time.Duration - totalComparisonCount int - totalEqualCount int -} - -func newResultCollector(resultsChan chan chan TestResult, config *Config) *ResultCollector { - return &ResultCollector{resultsChan: resultsChan, config: config} -} - -func (c *ResultCollector) start(ctx context.Context, cancelCtx context.CancelFunc, resultsWg *sync.WaitGroup) { - go func() { - defer resultsWg.Done() - for { - select { - case testResultCh := <-c.resultsChan: - if testResultCh == nil { - return - } - select { - case result := <-testResultCh: - file := fmt.Sprintf("%-60s", result.Test.Name) - tt := fmt.Sprintf("%-15s", result.Test.TransportType) - fmt.Printf("%04d. %s::%s ", result.Test.Number, tt, file) - - if result.Outcome.Success { - c.successTests++ - if c.config.VerboseLevel > 0 { - fmt.Println("OK") - } else { - fmt.Print("OK\r") - } - c.totalRoundTripTime += result.Outcome.Metrics.RoundTripTime - c.totalMarshallingTime += result.Outcome.Metrics.MarshallingTime - c.totalUnmarshallingTime += result.Outcome.Metrics.UnmarshallingTime - c.totalComparisonCount += result.Outcome.Metrics.ComparisonCount - c.totalEqualCount += result.Outcome.Metrics.EqualCount - } else { - c.failedTests++ - if result.Outcome.Error != nil { - fmt.Printf("failed: %s\n", result.Outcome.Error.Error()) - if errors.Is(result.Outcome.Error, errDiffMismatch) && result.Outcome.ColoredDiff != "" { - fmt.Printf(result.Outcome.ColoredDiff) - } - } else { - fmt.Printf("failed: no error\n") - } - if c.config.ExitOnFail { - // Signal other tasks to stop and exit - cancelCtx() - return - } - } - c.executedTests++ - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } - }() -} - -func runMain() int { - // Create a channel to receive OS signals and register for clean termination signals. - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - - // Parse command line arguments - config := NewConfig() - if err := config.parseFlags(); err != nil { - _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err) - usage() + fmt.Fprintf(os.Stderr, "Error: %v\n", err) return -1 } - - // Handle embedded CPU/memory profiling and execution tracing - if config.CpuProfile != "" { - f, err := os.Create(config.CpuProfile) - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "could not create CPU profile: %v\n", err) - } - defer func(f *os.File) { - err := f.Close() - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "could not close CPU profile: %v\n", err) - } - }(f) - if err := pprof.StartCPUProfile(f); err != nil { - _, _ = fmt.Fprintf(os.Stderr, "could not start CPU profile: %v\n", err) - } - defer pprof.StopCPUProfile() - } - - if config.TraceFile != "" { - f, err := os.Create(config.TraceFile) - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "could not create trace file: %v\n", err) - } - defer func(f *os.File) { - err := f.Close() - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "could not close trace file: %v\n", err) - } - }(f) - if err := trace.Start(f); err != nil { - _, _ = fmt.Fprintf(os.Stderr, "could not start trace: %v\n", err) - } - defer trace.Stop() - } - - defer func() { - if config.MemProfile != "" { - f, err := os.Create(config.MemProfile) - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "could not create memory profile: %v\n", err) - } - defer func(f *os.File) { - err := f.Close() - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "could not close memory profile: %v\n", err) - } - }(f) - runtime.GC() // get up-to-date statistics - if err := pprof.WriteHeapProfile(f); err != nil { - _, _ = fmt.Fprintf(os.Stderr, "could not write memory profile: %v\n", err) - } - } - }() - - // Clean temp dirs if exists // TODO: use OS temp dir? - if _, err := os.Stat(TempDirname); err == nil { - err := os.RemoveAll(TempDirname) - if err != nil { - return -1 - } - } - - startTime := time.Now() - err := os.MkdirAll(config.OutputDir, 0755) - if err != nil { - return -1 - } - - scheduledTests := 0 - skippedTests := 0 - - var serverEndpoints string - if config.VerifyWithDaemon { - if config.DaemonAsReference == ExternalProvider { - serverEndpoints = "both servers (rpcdaemon with " + config.ExternalProviderURL + ")" - } else { - serverEndpoints = "both servers (rpcdaemon with " + config.DaemonUnderTest + ")" - } - } else { - target := getTarget(config.DaemonUnderTest, "eth_call", config) - target1 := getTarget(config.DaemonUnderTest, "engine_", config) - serverEndpoints = target + "/" + target1 - } - - if config.Parallel { - fmt.Printf("Run tests in parallel on %s\n", serverEndpoints) - } else { - fmt.Printf("Run tests in serial on %s\n", serverEndpoints) - } - - if strings.Contains(config.TransportType, "_comp") { - fmt.Println("Run tests using compression") - } - - if config.VerifyWithDaemon && config.TestsOnLatestBlock { - var server1 = fmt.Sprintf("%s:%d", config.DaemonOnHost, config.ServerPort) - var maxRetries = 10 - var retryDelay = 1 * time.Second - latestBlock, err := getConsistentLatestBlock(config, server1, config.ExternalProviderURL, maxRetries, retryDelay) - if err != nil { - fmt.Println("sync on latest block number failed ", err) - return -1 // TODO: unique return codes? - } - if config.VerboseLevel > 0 { - fmt.Printf("Latest block number for %s, %s: %d\n", server1, config.ExternalProviderURL, latestBlock) - } - } - - resultsAbsoluteDir, err := filepath.Abs(config.ResultsDir) - if err != nil { - return -1 - } - fmt.Printf("Result directory: %s\n", resultsAbsoluteDir) - - globalTestNumber := 0 - availableTestedAPIs := 0 - testRep := 0 - - // Worker pool for parallel execution - var wg sync.WaitGroup - testsChan := make(chan *TestDescriptor, 2000) - resultsChan := make(chan chan TestResult, 2000) - - numWorkers := 1 - if config.Parallel { - numWorkers = runtime.NumCPU() - } - - ctx, cancelCtx := context.WithCancel(context.Background()) - - // Start workers - for i := 0; i < numWorkers; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case test := <-testsChan: - if test == nil { - return - } - testOutcome := runTest(ctx, test, config) - test.ResultChan <- TestResult{Outcome: testOutcome, Test: test} - case <-ctx.Done(): - return - } - } - }() - } - - // Results collector - var resultsWg sync.WaitGroup - resultsWg.Add(1) - resultsCollector := newResultCollector(resultsChan, config) - resultsCollector.start(ctx, cancelCtx, &resultsWg) - - go func() { - for { - select { - case sig := <-sigs: - fmt.Printf("\nReceived signal: %s. Starting graceful shutdown...\n", sig) - cancelCtx() - case <-ctx.Done(): - return - } - } - }() - - defer func() { - if r := recover(); r != nil { - fmt.Println("\nCRITICAL: TEST SEQUENCE INTERRUPTED!") - } - }() - - for testRep = 0; testRep < config.LoopNumber; testRep++ { - select { - case <-ctx.Done(): - break - default: - } - - if config.LoopNumber != 1 { - fmt.Printf("\nTest iteration: %d\n", testRep+1) - } - - transportTypes := strings.Split(config.TransportType, ",") - for _, transportType := range transportTypes { - select { - case <-ctx.Done(): - break - default: - } - - testNumberInAnyLoop := 1 - - dirs, err := os.ReadDir(config.JSONDir) - if err != nil { - _, err := fmt.Fprintf(os.Stderr, "Error reading directory %s: %v\n", config.JSONDir, err) - if err != nil { - return -1 - } - continue - } - - // Sort directories - sort.Slice(dirs, func(i, j int) bool { - return dirs[i].Name() < dirs[j].Name() - }) - - globalTestNumber = 0 - availableTestedAPIs = 0 - - for _, currAPIEntry := range dirs { - select { - case <-ctx.Done(): - break - default: - } - - currAPI := currAPIEntry.Name() - - // Skip results folder and hidden folders - if currAPI == config.ResultsDir || strings.HasPrefix(currAPI, ".") { - continue - } - - testDir := filepath.Join(config.JSONDir, currAPI) - info, err := os.Stat(testDir) - if err != nil || !info.IsDir() { - continue - } - - availableTestedAPIs++ - - testEntries, err := os.ReadDir(testDir) - if err != nil { - continue - } - - // Sort test files by number - sort.Slice(testEntries, func(i, j int) bool { - return extractNumber(testEntries[i].Name()) < extractNumber(testEntries[j].Name()) - }) - - testNumber := 1 - for _, testEntry := range testEntries { - select { - case <-ctx.Done(): - break - default: - } - - testName := testEntry.Name() - - if !strings.HasPrefix(testName, "test_") { - continue - } - - ext := filepath.Ext(testName) - if ext != ".zip" && ext != ".gzip" && ext != ".json" && ext != ".tar" { - continue - } - - jsonTestFullName := filepath.Join(currAPI, testName) - - if apiUnderTest(currAPI, jsonTestFullName, config) { - if isSkipped(currAPI, jsonTestFullName, testNumberInAnyLoop, config) { - if config.StartTest == "" || testNumberInAnyLoop >= mustAtoi(config.StartTest) { - if !config.DisplayOnlyFail && config.ReqTestNumber == -1 { - file := fmt.Sprintf("%-60s", jsonTestFullName) - tt := fmt.Sprintf("%-15s", transportType) - fmt.Printf("%04d. %s::%s skipped\n", testNumberInAnyLoop, tt, file) - } - skippedTests++ - } - } else { - shouldRun := false - if config.TestingAPIsWith == "" && config.TestingAPIs == "" && (config.ReqTestNumber == -1 || config.ReqTestNumber == testNumberInAnyLoop) { - shouldRun = true - } else if config.TestingAPIsWith != "" && checkTestNameForNumber(testName, config.ReqTestNumber) { - shouldRun = true - } else if config.TestingAPIs != "" && checkTestNameForNumber(testName, config.ReqTestNumber) { - shouldRun = true - } - - if shouldRun && (config.StartTest == "" || testNumberInAnyLoop >= mustAtoi(config.StartTest)) { - testDesc := &TestDescriptor{ - Name: jsonTestFullName, - Number: testNumberInAnyLoop, - TransportType: transportType, - ResultChan: make(chan TestResult, 1), - } - select { - case <-ctx.Done(): - return -1 - case resultsChan <- testDesc.ResultChan: - } - select { - case <-ctx.Done(): - return -1 - case testsChan <- testDesc: - } - scheduledTests++ - - if config.WaitingTime > 0 { - time.Sleep(time.Duration(config.WaitingTime) * time.Millisecond) - } - } - } - } - - globalTestNumber++ - testNumberInAnyLoop++ - testNumber++ - } - } - } - } - - // Close channels and wait for completion - close(testsChan) - wg.Wait() - close(resultsChan) - resultsWg.Wait() - - if scheduledTests == 0 && config.TestingAPIsWith != "" { - fmt.Printf("WARN: API filter %s selected no tests\n", config.TestingAPIsWith) - } - - if config.ExitOnFail && resultsCollector.failedTests > 0 { - fmt.Println("WARN: test sequence interrupted by failure (ExitOnFail)") - } - - // Clean empty subfolders in the output dir - if entries, err := os.ReadDir(config.OutputDir); err == nil { - for _, entry := range entries { - if !entry.IsDir() { - continue - } - outputSubfolder := filepath.Join(config.OutputDir, entry.Name()) - if subEntries, err := os.ReadDir(outputSubfolder); err == nil && len(subEntries) == 0 { - err := os.Remove(outputSubfolder) - if err != nil { - fmt.Printf("WARN: clean failed %v\n", err) - } - } - } - } - - // Clean temp dir - err = os.RemoveAll(TempDirname) - if err != nil { - return -1 - } - - // Print results - elapsed := time.Since(startTime) - fmt.Println("\n ") - fmt.Printf("Total HTTP round-trip time: %v\n", resultsCollector.totalRoundTripTime) - fmt.Printf("Total Marshalling time: %v\n", resultsCollector.totalMarshallingTime) - fmt.Printf("Total Unmarshalling time: %v\n", resultsCollector.totalUnmarshallingTime) - fmt.Printf("Total Comparison count: %v\n", resultsCollector.totalComparisonCount) - fmt.Printf("Total Equal count: %v\n", resultsCollector.totalEqualCount) - fmt.Printf("Test session duration: %v\n", elapsed) - fmt.Printf("Test session iterations: %d\n", testRep) - fmt.Printf("Test suite total APIs: %d\n", availableTestedAPIs) - fmt.Printf("Test suite total tests: %d\n", globalTestNumber) - fmt.Printf("Number of skipped tests: %d\n", skippedTests) - fmt.Printf("Number of selected tests: %d\n", scheduledTests) - fmt.Printf("Number of executed tests: %d\n", resultsCollector.executedTests) - fmt.Printf("Number of success tests: %d\n", resultsCollector.successTests) - fmt.Printf("Number of failed tests: %d\n", resultsCollector.failedTests) - - if resultsCollector.failedTests > 0 { - return 1 - } - return 0 + return exitCode } func main() { - exitCode := runMain() - os.Exit(exitCode) + os.Exit(runMain()) } diff --git a/cmd/perf/main.go b/cmd/perf/main.go index b32195e4..9b55d8a5 100644 --- a/cmd/perf/main.go +++ b/cmd/perf/main.go @@ -1,1729 +1,44 @@ package main import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" "context" - "encoding/csv" - "encoding/json" - "errors" "fmt" - "io" "log" - "net/http" "os" - "os/exec" - "os/user" - "path/filepath" - "runtime" - "strconv" - "strings" - "time" - vegeta "github.com/tsenart/vegeta/v12/lib" + "github.com/erigontech/rpc-tests/internal/perf" "github.com/urfave/cli/v2" ) -const ( - DefaultTestSequence = "50:30,1000:30,2500:20,10000:20" - DefaultRepetitions = 10 - DefaultVegetaPatternTarFile = "" - DefaultClientVegetaOnCore = "-:-" - DefaultServerAddress = "localhost" - DefaultWaitingTime = 5 - DefaultMaxConn = "9000" - DefaultTestType = "eth_getLogs" - DefaultVegetaResponseTimeout = "300s" - DefaultMaxBodyRsp = "1500" - DefaultClientName = "rpcdaemon" - DefaultClientBuildDir = "" - - BinaryDir = "bin" -) - -var ( - RunTestDirname string - VegetaPatternDirname string - VegetaReport string - VegetaTarFileName string - VegetaPatternBase string -) - -func init() { - // Generate a random directory name - timestamp := time.Now().UnixNano() - RunTestDirname = fmt.Sprintf("/tmp/run_tests_%d", timestamp) - VegetaPatternDirname = RunTestDirname + "/erigon_stress_test" - VegetaReport = RunTestDirname + "/vegeta_report.hrd" - VegetaTarFileName = RunTestDirname + "/vegeta_TAR_File" - VegetaPatternBase = VegetaPatternDirname + "/vegeta_erigon_" -} - -// Config holds all configuration for the performance test -type Config struct { - VegetaPatternTarFile string - ClientVegetaOnCore string - ClientBuildDir string - Repetitions int - TestSequence string - ClientAddress string - TestType string - TestingClient string - WaitingTime int - VersionedTestReport bool - Verbose bool - MacConnection bool - CheckServerAlive bool - Tracing bool - EmptyCache bool - CreateTestReport bool - MaxConnection string - VegetaResponseTimeout string - MaxBodyRsp string - JSONReportFile string - BinaryFileFullPathname string - BinaryFile string - ChainName string - MorePercentiles bool - InstantReport bool - HaltOnVegetaError bool - DisableHttpCompression bool -} - -// NewConfig creates a new Config with default values -func NewConfig() *Config { - return &Config{ - VegetaPatternTarFile: DefaultVegetaPatternTarFile, - ClientVegetaOnCore: DefaultClientVegetaOnCore, - ClientBuildDir: DefaultClientBuildDir, - Repetitions: DefaultRepetitions, - TestSequence: DefaultTestSequence, - ClientAddress: DefaultServerAddress, - TestType: DefaultTestType, - TestingClient: DefaultClientName, - WaitingTime: DefaultWaitingTime, - VersionedTestReport: false, - Verbose: false, - MacConnection: false, - CheckServerAlive: true, - Tracing: false, - EmptyCache: false, - CreateTestReport: false, - MaxConnection: DefaultMaxConn, - VegetaResponseTimeout: DefaultVegetaResponseTimeout, - MaxBodyRsp: DefaultMaxBodyRsp, - JSONReportFile: "", - BinaryFileFullPathname: "", - BinaryFile: "", - ChainName: "mainnet", - MorePercentiles: false, - InstantReport: false, - HaltOnVegetaError: false, - DisableHttpCompression: false, - } -} - -// Validate checks the configuration for conflicts and invalid values -func (c *Config) Validate() error { - if c.JSONReportFile != "" && c.TestingClient == "" { - return fmt.Errorf("with json-report must also set testing-client") - } - - if c.ClientBuildDir != "" { - if _, err := os.Stat(c.ClientBuildDir); c.ClientBuildDir != "" && os.IsNotExist(err) { - return fmt.Errorf("client build dir not specified correctly: %s", c.ClientBuildDir) - } - } - - if c.EmptyCache { - currentUser, err := user.Current() - if err != nil { - return fmt.Errorf("failed to get current user: %w", err) - } - if currentUser.Username != "root" { - return fmt.Errorf("empty-cache option can only be used by root") - } - } - - return nil -} - -// TestSequenceItem represents a single test in the sequence -type TestSequenceItem struct { - QPS int - Duration int -} - -type TestSequence []TestSequenceItem - -// ParseTestSequence parses the test sequence string into structured items -func ParseTestSequence(sequence string) ([]TestSequenceItem, error) { - var items []TestSequenceItem - - parts := strings.Split(sequence, ",") - for _, part := range parts { - qpsDur := strings.Split(part, ":") - if len(qpsDur) != 2 { - return nil, fmt.Errorf("invalid test sequence format: %s", part) - } - - qps, err := strconv.Atoi(qpsDur[0]) - if err != nil { - return nil, fmt.Errorf("invalid QPS value: %s", qpsDur[0]) - } - - duration, err := strconv.Atoi(qpsDur[1]) - if err != nil { - return nil, fmt.Errorf("invalid duration value: %s", qpsDur[1]) - } - - items = append(items, TestSequenceItem{ - QPS: qps, - Duration: duration, - }) - } - - return items, nil -} - -// VegetaTarget represents a single HTTP request target for Vegeta -type VegetaTarget struct { - Method string `json:"method"` - URL string `json:"url"` - Body []byte `json:"body,omitempty"` - Header map[string][]string `json:"header,omitempty"` -} - -// TestMetrics holds the results of a performance test -type TestMetrics struct { - ClientName string - TestNumber int - Repetition int - QPS int - Duration int - MinLatency string - Mean string - P50 string - P90 string - P95 string - P99 string - MaxLatency string - SuccessRatio string - Error string - VegetaMetrics *vegeta.Metrics -} - -// JSONReport represents the structure of the JSON performance report -type JSONReport struct { - Platform PlatformInfo `json:"platform"` - Configuration ConfigurationInfo `json:"configuration"` - Results []TestResult `json:"results"` -} - -// PlatformInfo holds platform hardware and software information -type PlatformInfo struct { - Vendor string `json:"vendor"` - Product string `json:"product"` - Board string `json:"board"` - CPU string `json:"cpu"` - Bogomips string `json:"bogomips"` - Kernel string `json:"kernel"` - GCCVersion string `json:"gccVersion"` - GoVersion string `json:"goVersion"` - ClientCommit string `json:"clientCommit"` -} - -// ConfigurationInfo holds test configuration information -type ConfigurationInfo struct { - TestingClient string `json:"testingClient"` - TestingAPI string `json:"testingApi"` - TestSequence string `json:"testSequence"` - TestRepetitions int `json:"testRepetitions"` - VegetaFile string `json:"vegetaFile"` - VegetaChecksum string `json:"vegetaChecksum"` - Taskset string `json:"taskset"` -} - -// TestResult holds results for a single QPS/duration test -type TestResult struct { - QPS int `json:"qps"` - Duration int `json:"duration"` - TestRepetitions []RepetitionInfo `json:"testRepetitions"` -} - -// RepetitionInfo holds information for a single test repetition -type RepetitionInfo struct { - VegetaBinary string `json:"vegetaBinary"` - VegetaReport map[string]interface{} `json:"vegetaReport"` - VegetaReportHdrPlot string `json:"vegetaReportHdrPlot"` -} - -// Hardware provides methods to extract hardware information -type Hardware struct{} - -// Vendor returns the system vendor -func (h *Hardware) Vendor() string { - if runtime.GOOS != "linux" { - return "unknown" - } - data, err := os.ReadFile("/sys/devices/virtual/dmi/id/sys_vendor") - if err != nil { - return "unknown" - } - return strings.TrimSpace(string(data)) -} - -// NormalizedVendor returns the system vendor as a lowercase first token -func (h *Hardware) NormalizedVendor() string { - vendor := h.Vendor() - parts := strings.Split(vendor, " ") - if len(parts) > 0 { - return strings.ToLower(parts[0]) - } - return "unknown" -} - -// Product returns the system product name -func (h *Hardware) Product() string { - if runtime.GOOS != "linux" { - return "unknown" - } - data, err := os.ReadFile("/sys/devices/virtual/dmi/id/product_name") - if err != nil { - return "unknown" - } - return strings.TrimSpace(string(data)) -} - -// Board returns the system board name -func (h *Hardware) Board() string { - if runtime.GOOS != "linux" { - return "unknown" - } - data, err := os.ReadFile("/sys/devices/virtual/dmi/id/board_name") - if err != nil { - return "unknown" - } - return strings.TrimSpace(string(data)) -} - -// NormalizedProduct returns the system product name as lowercase without whitespaces -func (h *Hardware) NormalizedProduct() string { - product := h.Product() - return strings.ToLower(strings.ReplaceAll(product, " ", "")) -} - -// NormalizedBoard returns the board name as a lowercase name without whitespaces -func (h *Hardware) NormalizedBoard() string { - board := h.Board() - parts := strings.Split(board, "/") - if len(parts) > 0 { - return strings.ToLower(strings.ReplaceAll(parts[0], " ", "")) - } - return "unknown" -} - -// GetCPUModel returns the CPU model information -func (h *Hardware) GetCPUModel() string { - if runtime.GOOS != "linux" { - return "unknown" - } - - cmd := exec.Command("sh", "-c", "cat /proc/cpuinfo | grep 'model name' | uniq") - output, err := cmd.Output() - if err != nil { - return "unknown" - } - - parts := strings.Split(string(output), ":") - if len(parts) > 1 { - return strings.TrimSpace(parts[1]) - } - return "unknown" -} - -// GetBogomips returns the bogomips value -func (h *Hardware) GetBogomips() string { - if runtime.GOOS != "linux" { - return "unknown" - } - - cmd := exec.Command("sh", "-c", "cat /proc/cpuinfo | grep 'bogomips' | uniq") - output, err := cmd.Output() - if err != nil { - return "unknown" - } - - parts := strings.Split(string(output), ":") - if len(parts) > 1 { - return strings.TrimSpace(parts[1]) - } - return "unknown" -} - -// GetKernelVersion returns the kernel version -func GetKernelVersion() string { - cmd := exec.Command("uname", "-r") - output, err := cmd.Output() - if err != nil { - return "unknown" - } - return strings.TrimSpace(string(output)) -} - -// GetGCCVersion returns the GCC version -func GetGCCVersion() string { - cmd := exec.Command("gcc", "--version") - output, err := cmd.Output() - if err != nil { - return "unknown" - } - lines := strings.Split(string(output), "\n") - if len(lines) > 0 { - return strings.TrimSpace(lines[0]) - } - return "unknown" -} - -// GetGoVersion returns the Go version -func GetGoVersion() string { - cmd := exec.Command("go", "version") - output, err := cmd.Output() - if err != nil { - return "unknown" - } - return strings.TrimSpace(string(output)) -} - -// GetGitCommit returns the git commit hash for a directory -func GetGitCommit(dir string) string { - if dir == "" { - return "" - } - - cmd := exec.Command("git", "rev-parse", "HEAD") - cmd.Dir = dir - output, err := cmd.Output() - if err != nil { - return "" - } - return strings.TrimSpace(string(output)) -} - -// GetFileChecksum returns the checksum of a file -func GetFileChecksum(filepath string) string { - cmd := exec.Command("sum", filepath) - output, err := cmd.Output() - if err != nil { - return "" - } - parts := strings.Split(string(output), " ") - if len(parts) > 0 { - return parts[0] - } - return "" -} - -// IsProcessRunning checks if a process with the given name is running -func IsProcessRunning(processName string) bool { - cmd := exec.Command("pgrep", "-x", processName) - out, err := cmd.Output() - - return err == nil && len(out) > 0 -} - -// EmptyCache drops OS caches -func EmptyCache() error { - var cmd *exec.Cmd - - switch runtime.GOOS { - case "linux": - // Sync and drop caches - if err := exec.Command("sync").Run(); err != nil { - return fmt.Errorf("sync failed: %w", err) - } - cmd = exec.Command("sh", "-c", "echo 3 > /proc/sys/vm/drop_caches") - case "darwin": - // macOS purge - if err := exec.Command("sync").Run(); err != nil { - return fmt.Errorf("sync failed: %w", err) - } - cmd = exec.Command("purge") - default: - return fmt.Errorf("unsupported OS: %s", runtime.GOOS) - } - - if err := cmd.Run(); err != nil { - return fmt.Errorf("cache purge failed: %w", err) - } - - return nil -} - -// FormatDuration formats a duration string with units -func FormatDuration(d time.Duration) string { - if d < time.Millisecond { - return fmt.Sprintf("%.0fµs", float64(d.Microseconds())) - } - if d < time.Second { - return fmt.Sprintf("%.2fms", float64(d.Microseconds())/1000.0) - } - return fmt.Sprintf("%.2fs", d.Seconds()) -} - -// ParseLatency parses a latency string and returns it in a consistent format -func ParseLatency(latency string) string { - // Replace microsecond symbol and normalise - latency = strings.ReplaceAll(latency, "µs", "us") - return strings.TrimSpace(latency) -} - -// PerfTest manages performance test execution -type PerfTest struct { - config *Config - testReport *TestReport -} - -// Supported compression types -const ( - GzipCompression = ".gz" - Bzip2Compression = ".bz2" - NoCompression = "" -) - -// getCompressionType determines the compression from the filename extension. -func getCompressionType(filename string) string { - if strings.HasSuffix(filename, ".tar.gz") || strings.HasSuffix(filename, ".tgz") { - return GzipCompression - } - if strings.HasSuffix(filename, ".tar.bz2") || strings.HasSuffix(filename, ".tbz") { - return Bzip2Compression - } - return NoCompression -} - -func autodetectCompression(inFile *os.File) (string, error) { - // Assume we have no compression and try to detect it if the tar header is invalid - compressionType := NoCompression - tarReader := tar.NewReader(inFile) - _, err := tarReader.Next() - if err != nil && !errors.Is(err, io.EOF) { - // Reset the file position for read and check if it's gzip encoded - _, err = inFile.Seek(0, io.SeekStart) - if err != nil { - return compressionType, err - } - _, err = gzip.NewReader(inFile) - if err == nil { - compressionType = GzipCompression - } else { - // Reset the file position for read and check if it's gzip encoded - _, err = inFile.Seek(0, io.SeekStart) - if err != nil { - return compressionType, err - } - _, err = tar.NewReader(bzip2.NewReader(inFile)).Next() - if err == nil { - compressionType = Bzip2Compression - } - } - } - return compressionType, nil -} - -// NewPerfTest creates a new performance test instance -func NewPerfTest(config *Config, testReport *TestReport) (*PerfTest, error) { - pt := &PerfTest{ - config: config, - testReport: testReport, - } - - // Initial cleanup - if err := pt.Cleanup(true); err != nil { - return nil, fmt.Errorf("initial cleanup failed: %w", err) - } - - // Copy and extract the pattern file - if err := pt.CopyAndExtractPatternFile(); err != nil { - return nil, fmt.Errorf("failed to setup pattern file: %w", err) - } - - return pt, nil -} - -// Cleanup removes temporary files -func (pt *PerfTest) Cleanup(initial bool) error { - filesToRemove := []string{ - VegetaTarFileName, - "perf.data.old", - "perf.data", - } - - for _, fileName := range filesToRemove { - _, err := os.Stat(fileName) - if errors.Is(err, os.ErrNotExist) { - continue - } - err = os.Remove(fileName) - if err != nil { - return err - } - } - - // Remove the pattern directory - err := os.RemoveAll(VegetaPatternDirname) - if err != nil { - return err - } - - // Remove the run test directory - if initial { - err := os.RemoveAll(RunTestDirname) - if err != nil { - return err - } - } else { - // Try to remove, ignore if not empty - _ = os.Remove(RunTestDirname) - } - - return nil -} - -// CopyAndExtractPatternFile copies and extracts the vegeta pattern tar file -func (pt *PerfTest) CopyAndExtractPatternFile() error { - // Check if the pattern file exists - if _, err := os.Stat(pt.config.VegetaPatternTarFile); os.IsNotExist(err) { - return fmt.Errorf("invalid pattern file: %s", pt.config.VegetaPatternTarFile) - } - - // Create the run test directory - if err := os.MkdirAll(RunTestDirname, 0755); err != nil { - return fmt.Errorf("failed to create temp directory: %w", err) - } - - // Copy tar file - if err := pt.copyFile(pt.config.VegetaPatternTarFile, VegetaTarFileName); err != nil { - return fmt.Errorf("failed to copy pattern file: %w", err) - } - - if pt.config.Tracing { - fmt.Printf("Copy Vegeta pattern: %s -> %s\n", pt.config.VegetaPatternTarFile, VegetaTarFileName) - } - - // Extract tar file - if err := pt.extractTarGz(VegetaTarFileName, RunTestDirname); err != nil { - return fmt.Errorf("failed to extract pattern file: %w", err) - } - - if pt.config.Tracing { - fmt.Printf("Extracting Vegeta pattern to: %s\n", RunTestDirname) - } - - // Substitute address if not localhost - if pt.config.ClientAddress != "localhost" { - patternDir := VegetaPatternBase + pt.config.TestType + ".txt" - - if err := pt.replaceInFile(patternDir, "localhost", pt.config.ClientAddress); err != nil { - log.Printf("Warning: failed to replace address in pattern: %v", err) - } - } - - return nil -} - -// copyFile copies a file from src to dst -func (pt *PerfTest) copyFile(src, dst string) error { - sourceFile, err := os.Open(src) - if err != nil { - return err - } - defer func(sourceFile *os.File) { - err := sourceFile.Close() - if err != nil { - log.Printf("Warning: failed to close source file: %v", err) - } - }(sourceFile) - - destFile, err := os.Create(dst) - if err != nil { - return err - } - defer func(destFile *os.File) { - err := destFile.Close() - if err != nil { - log.Printf("Warning: failed to close destination file: %v", err) - } - }(destFile) - - _, err = io.Copy(destFile, sourceFile) - return err -} - -// extractTarGz extracts a tar.gz file to a destination directory -func (pt *PerfTest) extractTarGz(tarFile, destDir string) error { - // Open the archive file - file, err := os.Open(tarFile) - if err != nil { - return fmt.Errorf("failed to open archive: %w", err) - } - defer func(inFile *os.File) { - _ = inFile.Close() - }(file) - - // Wrap the input file with the correct compression reader - compressionType := getCompressionType(tarFile) - if compressionType == NoCompression { - // Possibly handle the corner case where the file is compressed but has tar extension - compressionType, err = autodetectCompression(file) - if err != nil { - return fmt.Errorf("failed to autodetect compression for archive: %w", err) - } - file, err = os.Open(tarFile) - if err != nil { - return err - } - } - - var reader io.Reader - switch compressionType { - case GzipCompression: - if reader, err = gzip.NewReader(file); err != nil { - return fmt.Errorf("failed to create gzip reader: %w", err) - } - case Bzip2Compression: - reader = bzip2.NewReader(file) - case NoCompression: - reader = file - } - - tr := tar.NewReader(reader) - - for { - header, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - target := filepath.Join(destDir, header.Name) - - switch header.Typeflag { - case tar.TypeDir: - if err := os.MkdirAll(target, 0755); err != nil { - return err - } - case tar.TypeReg: - outFile, err := os.Create(target) - if err != nil { - return err - } - if _, err := io.Copy(outFile, tr); err != nil { - err := outFile.Close() - if err != nil { - return err - } - return err - } - err = outFile.Close() - if err != nil { - return err - } - } - } - - return nil -} - -// replaceInFile replaces old string with new string in a file -func (pt *PerfTest) replaceInFile(filepath, old, new string) error { - input, err := os.ReadFile(filepath) - if err != nil { - return err - } - - output := strings.ReplaceAll(string(input), old, new) - - return os.WriteFile(filepath, []byte(output), 0644) -} - -// Execute runs a single performance test -func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, name string, qps, duration int, format ResultFormat) error { - // Empty cache if configured - if pt.config.EmptyCache { - if err := EmptyCache(); err != nil { - log.Printf("Warning: failed to empty cache: %v", err) - } - } - - // Determine pattern file - var pattern string - pattern = VegetaPatternBase + pt.config.TestType + ".txt" - - // Create the binary file name - timestamp := time.Now().Format("20060102150405") - pt.config.BinaryFile = fmt.Sprintf("%s_%s_%s_%s_%d_%d_%d.bin", - timestamp, - pt.config.ChainName, - pt.config.TestingClient, - pt.config.TestType, - qps, - duration, - repetition+1) - - // Create the binary directory - var dirname string - if pt.config.VersionedTestReport { - dirname = "./perf/reports/" + BinaryDir + "/" - } else { - dirname = RunTestDirname + "/" + BinaryDir + "/" - } - - if err := os.MkdirAll(dirname, 0755); err != nil { - return fmt.Errorf("failed to create binary directory: %w", err) - } - - pt.config.BinaryFileFullPathname = dirname + pt.config.BinaryFile - - // Print test result information - maxRepetitionDigits := strconv.Itoa(format.maxRepetitionDigits) - maxQpsDigits := strconv.Itoa(format.maxQpsDigits) - maxDurationDigits := strconv.Itoa(format.maxDurationDigits) - fmt.Printf("[%d.%"+maxRepetitionDigits+"d] %s: executes test qps: %"+maxQpsDigits+"d time: %"+maxDurationDigits+"d -> ", - testNumber, repetition+1, pt.config.TestingClient, qps, duration) - - // Load targets from pattern file - targets, err := pt.loadTargets(pattern) - if err != nil { - return fmt.Errorf("failed to load targets: %w", err) - } - - // Run vegeta attack - metrics, err := pt.runVegetaAttack(ctx, targets, qps, time.Duration(duration)*time.Second, pt.config.BinaryFileFullPathname) - if err != nil { - return fmt.Errorf("vegeta attack failed: %w", err) - } - - // Check if the server is still alive during the test - if pt.config.CheckServerAlive { - if !IsProcessRunning(pt.config.TestingClient) { - fmt.Println("test failed: server is Dead") - return fmt.Errorf("server died during test") - } - } - - // Process results - return pt.processResults(testNumber, repetition, name, qps, duration, metrics) -} - -// loadTargets loads Vegeta targets from a pattern file -func (pt *PerfTest) loadTargets(filepath string) ([]vegeta.Target, error) { - file, err := os.Open(filepath) - if err != nil { - return nil, err - } - defer func(file *os.File) { - err := file.Close() - if err != nil { - log.Printf("Warning: failed to close pattern file: %v", err) - } - }(file) - - const maxCapacity = 1024 * 1024 // 1MB - var targets []vegeta.Target - scanner := bufio.NewScanner(file) - buffer := make([]byte, 0, maxCapacity) - scanner.Buffer(buffer, maxCapacity) - - for scanner.Scan() { - line := scanner.Text() - if line == "" { - continue - } - - var vt VegetaTarget - if err := json.Unmarshal([]byte(line), &vt); err != nil { - return nil, fmt.Errorf("failed to parse target: %w", err) - } - - target := vegeta.Target{ - Method: vt.Method, - URL: vt.URL, - Body: vt.Body, - Header: make(http.Header), - } - - for k, v := range vt.Header { - for _, vv := range v { - target.Header.Set(k, vv) - } - } - - targets = append(targets, target) - } - - if err := scanner.Err(); err != nil { - return nil, err - } - - if len(targets) == 0 { - return nil, fmt.Errorf("no targets found in pattern file") - } - - // Print test port information - /*if pt.config.Verbose { - fmt.Printf("Test on port: %s\n", targets[0].URL) - }*/ - - return targets, nil -} - -// runVegetaAttack executes a Vegeta attack using the library -func (pt *PerfTest) runVegetaAttack(ctx context.Context, targets []vegeta.Target, qps int, duration time.Duration, outputFile string) (*vegeta.Metrics, error) { - // Create rate - rate := vegeta.Rate{Freq: qps, Per: time.Second} - - // Create targeter - targeter := vegeta.NewStaticTargeter(targets...) - - // Create attacker - timeout, _ := time.ParseDuration(pt.config.VegetaResponseTimeout) - maxConnInt, _ := strconv.Atoi(pt.config.MaxConnection) - maxBodyInt, _ := strconv.Atoi(pt.config.MaxBodyRsp) - - tr := &http.Transport{ - DisableCompression: pt.config.DisableHttpCompression, - Proxy: http.ProxyFromEnvironment, - } - - customClient := &http.Client{ - Transport: tr, - } - - attacker := vegeta.NewAttacker( - vegeta.Client(customClient), - vegeta.Timeout(timeout), - vegeta.Workers(uint64(maxConnInt)), - vegeta.MaxBody(int64(maxBodyInt)), - vegeta.KeepAlive(true), - ) - - // Create the output file for results - out, err := os.Create(outputFile) - if err != nil { - return nil, fmt.Errorf("failed to create output file: %w", err) - } - defer func(out *os.File) { - err := out.Close() - if err != nil { - log.Printf("Warning: failed to close output file: %v", err) - } - }(out) - - encoder := vegeta.NewEncoder(out) - - // Execute the attack i.e. the test workload - var metrics vegeta.Metrics - resultCh := attacker.Attack(targeter, rate, duration, "vegeta-attack") - for { - select { - case result := <-resultCh: - if result == nil { - metrics.Close() - return &metrics, nil - } - metrics.Add(result) - if err := encoder.Encode(result); err != nil { - log.Printf("Warning: failed to encode result: %v", err) - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } -} - -// ExecuteSequence executes a sequence of performance tests -func (pt *PerfTest) ExecuteSequence(ctx context.Context, sequence []TestSequenceItem, tag string) error { - testNumber := 1 - - // Get pattern to extract port information - var pattern string - pattern = VegetaPatternBase + pt.config.TestType + ".txt" - - // Print port information - if file, err := os.Open(pattern); err == nil { - scanner := bufio.NewScanner(file) - if scanner.Scan() { - var vt VegetaTarget - if json.Unmarshal([]byte(scanner.Text()), &vt) == nil { - fmt.Printf("Test on port: %s\n", vt.URL) - } - } - err = file.Close() - if err != nil { - return err - } - } - - maxQpsDigits, maxDurationDigits := maxQpsAndDurationDigits(sequence) - resultFormat := ResultFormat{ - maxRepetitionDigits: countDigits(pt.config.Repetitions), - maxQpsDigits: maxQpsDigits, - maxDurationDigits: maxDurationDigits, - } - - // Execute each test in sequence - for _, test := range sequence { - for rep := 0; rep < pt.config.Repetitions; rep++ { - if test.QPS > 0 { - err := pt.Execute(ctx, testNumber, rep, tag, test.QPS, test.Duration, resultFormat) - if err != nil { - return err - } - } else { - // qps = 0 means we've been asked for a silence period - time.Sleep(time.Duration(test.Duration) * time.Second) - } - - time.Sleep(time.Duration(pt.config.WaitingTime) * time.Second) - } - testNumber++ - fmt.Println() - } - - return nil -} - -func countDigits(n int) int { - if n == 0 { - return 1 - } - digits := 0 - for n != 0 { - n /= 10 - digits++ - } - return digits -} - -func maxQpsAndDurationDigits(sequence TestSequence) (maxQpsDigits, maxDurationDigits int) { - for _, item := range sequence { - qpsDigits := countDigits(item.QPS) - if qpsDigits > maxQpsDigits { - maxQpsDigits = qpsDigits - } - durationDigits := countDigits(item.Duration) - if durationDigits > maxDurationDigits { - maxDurationDigits = durationDigits - } - } - return -} - -type ResultFormat struct { - maxRepetitionDigits, maxQpsDigits, maxDurationDigits int -} - -// processResults processes the vegeta metrics and generates reports -func (pt *PerfTest) processResults(testNumber, repetition int, name string, qps, duration int, metrics *vegeta.Metrics) error { - // Extract latency values - minLatency := FormatDuration(metrics.Latencies.Min) - mean := FormatDuration(metrics.Latencies.Mean) - p50 := FormatDuration(metrics.Latencies.P50) - p90 := FormatDuration(metrics.Latencies.P90) - p95 := FormatDuration(metrics.Latencies.P95) - p99 := FormatDuration(metrics.Latencies.P99) - maxLatency := FormatDuration(metrics.Latencies.Max) - - // Calculate success ratio - successRatio := fmt.Sprintf("%.2f%%", metrics.Success*100) - - // Check for errors - errorMsg := "" - if len(metrics.Errors) > 0 { - // Collect unique error messages - errorMap := make(map[string]int) - for _, err := range metrics.Errors { - errorMap[err]++ - } - - const MaxErrorsToDisplay = 1 - errorsToDisplay := 0 - for errStr, count := range errorMap { - if errorsToDisplay >= MaxErrorsToDisplay { - break - } - if errorMsg != "" { - errorMsg += "; " - } - errorMsg += fmt.Sprintf("%s (x%d)", errStr, count) - errorsToDisplay++ - } - if errorsToDisplay < len(errorMap) { - errorMsg += fmt.Sprintf(" (+%d more)", len(errorMap)-errorsToDisplay) - } - } - - // Print results - var resultRecord string - if pt.config.MorePercentiles { - resultRecord = fmt.Sprintf("success=%7s lat=[p50=%8s p90=%8s p95=%8s p99=%8s max=%8s]", - successRatio, p50, p90, p95, p99, maxLatency) - } else { - resultRecord = fmt.Sprintf("success=%7s lat=[max=%8s]", successRatio, maxLatency) - } - if errorMsg != "" { - resultRecord += fmt.Sprintf(" error=%s", errorMsg) - } - fmt.Println(resultRecord) - - // Check for failures - if errorMsg != "" && pt.config.HaltOnVegetaError { - return fmt.Errorf("test failed: %s", errorMsg) - } - - if successRatio != "100.00%" { - return fmt.Errorf("test failed: ratio is not 100.00%%") - } - - // Write to the test report if enabled - if pt.config.CreateTestReport { - testMetrics := &TestMetrics{ - ClientName: name, - TestNumber: testNumber, - Repetition: repetition, - QPS: qps, - Duration: duration, - MinLatency: minLatency, - Mean: mean, - P50: p50, - P90: p90, - P95: p95, - P99: p99, - MaxLatency: maxLatency, - SuccessRatio: successRatio, - Error: errorMsg, - VegetaMetrics: metrics, - } - - if err := pt.testReport.WriteTestReport(testMetrics); err != nil { - return fmt.Errorf("failed to write test report: %w", err) - } - } - - // Print instant report if enabled - if pt.config.InstantReport { - pt.printInstantReport(metrics) - } - - return nil -} - -// printInstantReport prints detailed metrics to the console -func (pt *PerfTest) printInstantReport(metrics *vegeta.Metrics) { - fmt.Println("\n=== Detailed Metrics ===") - fmt.Printf("Requests: %d\n", metrics.Requests) - fmt.Printf("Duration: %v\n", metrics.Duration) - fmt.Printf("Rate: %.2f req/s\n", metrics.Rate) - fmt.Printf("Throughput: %.2f req/s\n", metrics.Throughput) - fmt.Printf("Success: %.2f%%\n", metrics.Success*100) - - fmt.Println("\nLatencies:") - fmt.Printf(" Min: %v\n", metrics.Latencies.Min) - fmt.Printf(" Mean: %v\n", metrics.Latencies.Mean) - fmt.Printf(" P50: %v\n", metrics.Latencies.P50) - fmt.Printf(" P90: %v\n", metrics.Latencies.P90) - fmt.Printf(" P95: %v\n", metrics.Latencies.P95) - fmt.Printf(" P99: %v\n", metrics.Latencies.P99) - fmt.Printf(" Max: %v\n", metrics.Latencies.Max) - - fmt.Println("\nStatus Codes:") - for code, count := range metrics.StatusCodes { - fmt.Printf(" %s: %d\n", code, count) - } - - if len(metrics.Errors) > 0 { - fmt.Println("\nErrors:") - errorMap := make(map[string]int) - for _, err := range metrics.Errors { - errorMap[err]++ - } - for errStr, count := range errorMap { - fmt.Printf(" %s: %d\n", errStr, count) - } - } - - fmt.Print("========================\n\n") -} - -// TestReport manages CSV and JSON report generation -type TestReport struct { - config *Config - csvFile *os.File - csvWriter *csv.Writer - jsonReport *JSONReport - hardware *Hardware - currentTestIdx int -} - -// NewTestReport creates a new test report instance -func NewTestReport(config *Config) *TestReport { - return &TestReport{ - config: config, - hardware: &Hardware{}, - currentTestIdx: -1, - } -} - -// Open initialises the test report and writes headers -func (tr *TestReport) Open() error { - if err := tr.createCSVFile(); err != nil { - return fmt.Errorf("failed to create CSV file: %w", err) - } - - // Collect system information - checksum := GetFileChecksum(tr.config.VegetaPatternTarFile) - gccVersion := GetGCCVersion() - goVersion := GetGoVersion() - kernelVersion := GetKernelVersion() - cpuModel := tr.hardware.GetCPUModel() - bogomips := tr.hardware.GetBogomips() - - var clientCommit string - if tr.config.ClientBuildDir != "" { - clientCommit = GetGitCommit(tr.config.ClientBuildDir) - } else { - clientCommit = "none" - } - - // Write headers - if err := tr.writeTestHeader(cpuModel, bogomips, kernelVersion, checksum, - gccVersion, goVersion, clientCommit); err != nil { - return fmt.Errorf("failed to write test header: %w", err) - } - - // Initialise the JSON report if needed - if tr.config.JSONReportFile != "" { - tr.initializeJSONReport(cpuModel, bogomips, kernelVersion, checksum, - gccVersion, goVersion, clientCommit) - } - - return nil -} - -// createCSVFile creates the CSV report file with appropriate naming -func (tr *TestReport) createCSVFile() error { - // Determine folder extension - extension := tr.hardware.NormalizedProduct() - if extension == "systemproductname" { - extension = tr.hardware.NormalizedBoard() - } - - // Create the folder path - csvFolder := tr.hardware.NormalizedVendor() + "_" + extension - var csvFolderPath string - if tr.config.VersionedTestReport { - csvFolderPath = filepath.Join("./perf/reports", tr.config.ChainName, csvFolder) - } else { - csvFolderPath = filepath.Join(RunTestDirname, tr.config.ChainName, csvFolder) - } - - if err := os.MkdirAll(csvFolderPath, 0755); err != nil { - return fmt.Errorf("failed to create CSV folder: %w", err) - } - - // Generate CSV filename - timestamp := time.Now().Format("20060102150405") - var csvFilename string - if tr.config.TestingClient != "" { - csvFilename = fmt.Sprintf("%s_%s_%s_perf.csv", - tr.config.TestType, timestamp, tr.config.TestingClient) - } else { - csvFilename = fmt.Sprintf("%s_%s_perf.csv", - tr.config.TestType, timestamp) - } - - csvFilepath := filepath.Join(csvFolderPath, csvFilename) - - // Create and open the CSV report file - file, err := os.Create(csvFilepath) - if err != nil { - return fmt.Errorf("failed to create CSV file: %w", err) - } - - tr.csvFile = file - tr.csvWriter = csv.NewWriter(file) - - fmt.Printf("Perf report file: %s\n\n", csvFilepath) - - return nil -} - -// writeTestHeader writes the test configuration header to CSV -func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksum, gccVersion, goVersion, clientCommit string) error { - - // Write platform information - emptyRow := make([]string, 14) - - err := tr.csvWriter.Write(append(emptyRow[:12], "vendor", tr.hardware.Vendor())) - if err != nil { - return err - } - - product := tr.hardware.Product() - if product != "System Product Name" { - err := tr.csvWriter.Write(append(emptyRow[:12], "product", product)) - if err != nil { - return err - } - } else { - err := tr.csvWriter.Write(append(emptyRow[:12], "board", tr.hardware.Board())) - if err != nil { - return err - } - } - - err = tr.csvWriter.Write(append(emptyRow[:12], "cpu", cpuModel)) - if err != nil { - return err - } - err = tr.csvWriter.Write(append(emptyRow[:12], "bogomips", bogomips)) - if err != nil { - return err - } - err = tr.csvWriter.Write(append(emptyRow[:12], "kernel", kernelVersion)) - if err != nil { - return err - } - err = tr.csvWriter.Write(append(emptyRow[:12], "taskset", tr.config.ClientVegetaOnCore)) - if err != nil { - return err - } - err = tr.csvWriter.Write(append(emptyRow[:12], "vegetaFile", tr.config.VegetaPatternTarFile)) - if err != nil { - return err - } - err = tr.csvWriter.Write(append(emptyRow[:12], "vegetaChecksum", checksum)) - if err != nil { - return err - } - err = tr.csvWriter.Write(append(emptyRow[:12], "gccVersion", gccVersion)) - if err != nil { - return err - } - err = tr.csvWriter.Write(append(emptyRow[:12], "goVersion", goVersion)) - if err != nil { - return err - } - err = tr.csvWriter.Write(append(emptyRow[:12], "clientVersion", clientCommit)) - if err != nil { - return err - } - - // Empty rows - for range 2 { - err := tr.csvWriter.Write([]string{}) - if err != nil { - return err - } - } - - // Write column headers - headers := []string{ - "ClientName", "TestNo", "Repetition", "Qps", "Time(secs)", - "Min", "Mean", "50", "90", "95", "99", "Max", "Ratio", "Error", - } - err = tr.csvWriter.Write(headers) - if err != nil { - return err - } - tr.csvWriter.Flush() - - return tr.csvWriter.Error() -} - -// initializeJSONReport initializes the JSON report structure -func (tr *TestReport) initializeJSONReport(cpuModel, bogomips, kernelVersion, checksum, - gccVersion, goVersion, clientCommit string) { - - tr.jsonReport = &JSONReport{ - Platform: PlatformInfo{ - Vendor: strings.TrimSpace(tr.hardware.Vendor()), - Product: strings.TrimSpace(tr.hardware.Product()), - Board: strings.TrimSpace(tr.hardware.Board()), - CPU: strings.TrimSpace(cpuModel), - Bogomips: strings.TrimSpace(bogomips), - Kernel: strings.TrimSpace(kernelVersion), - GCCVersion: strings.TrimSpace(gccVersion), - GoVersion: strings.TrimSpace(goVersion), - ClientCommit: strings.TrimSpace(clientCommit), - }, - Configuration: ConfigurationInfo{ - TestingClient: tr.config.TestingClient, - TestingAPI: tr.config.TestType, - TestSequence: tr.config.TestSequence, - TestRepetitions: tr.config.Repetitions, - VegetaFile: tr.config.VegetaPatternTarFile, - VegetaChecksum: checksum, - Taskset: tr.config.ClientVegetaOnCore, - }, - Results: []TestResult{}, - } -} - -// WriteTestReport writes a test result to the report -func (tr *TestReport) WriteTestReport(metrics *TestMetrics) error { - // Write to CSV - row := []string{ - metrics.ClientName, - strconv.Itoa(metrics.TestNumber), - strconv.Itoa(metrics.Repetition), - strconv.Itoa(metrics.QPS), - strconv.Itoa(metrics.Duration), - metrics.MinLatency, - metrics.Mean, - metrics.P50, - metrics.P90, - metrics.P95, - metrics.P99, - metrics.MaxLatency, - metrics.SuccessRatio, - metrics.Error, - } - - if err := tr.csvWriter.Write(row); err != nil { - return fmt.Errorf("failed to write CSV row: %w", err) - } - tr.csvWriter.Flush() - - // Write to JSON if enabled - if tr.config.JSONReportFile != "" { - if err := tr.writeTestReportToJSON(metrics); err != nil { - return fmt.Errorf("failed to write JSON report: %w", err) - } - } - - return nil -} - -// writeTestReportToJSON writes a test result to the JSON report -func (tr *TestReport) writeTestReportToJSON(metrics *TestMetrics) error { - // Check if we need to create a new test result entry - if metrics.Repetition == 0 { - tr.currentTestIdx++ - tr.jsonReport.Results = append(tr.jsonReport.Results, TestResult{ - QPS: metrics.QPS, - Duration: metrics.Duration, - TestRepetitions: []RepetitionInfo{}, - }) - } - - // Generate JSON report from the binary file - jsonReportData, err := tr.generateJSONReport(tr.config.BinaryFileFullPathname) - if err != nil { - return fmt.Errorf("failed to generate JSON report: %w", err) - } - - // Generate HDR plot - hdrPlot, err := tr.generateHdrPlot(tr.config.BinaryFileFullPathname) - if err != nil { - return fmt.Errorf("failed to generate HDR plot: %w", err) - } - - // Add repetition info - repetitionInfo := RepetitionInfo{ - VegetaBinary: tr.config.BinaryFile, - VegetaReport: jsonReportData, - VegetaReportHdrPlot: hdrPlot, - } - - if tr.currentTestIdx >= 0 && tr.currentTestIdx < len(tr.jsonReport.Results) { - tr.jsonReport.Results[tr.currentTestIdx].TestRepetitions = append( - tr.jsonReport.Results[tr.currentTestIdx].TestRepetitions, - repetitionInfo, - ) - } - - return nil -} - -// generateJSONReport generates a JSON report from the binary file -func (tr *TestReport) generateJSONReport(binaryFile string) (map[string]interface{}, error) { - // Read the binary file - file, err := os.Open(binaryFile) - if err != nil { - return nil, err - } - defer func(file *os.File) { - err := file.Close() - if err != nil { - log.Printf("Warning: failed to close file: %v", err) - } - }(file) - - // Decode results - dec := vegeta.NewDecoder(file) - - // Create metrics - var metrics vegeta.Metrics - for { - var result vegeta.Result - if err := dec.Decode(&result); err != nil { - if err == io.EOF { - break - } - return nil, err - } - metrics.Add(&result) - } - metrics.Close() - - // Convert metrics to map - report := map[string]interface{}{ - "requests": metrics.Requests, - "duration": metrics.Duration.Seconds(), - "rate": metrics.Rate, - "throughput": metrics.Throughput, - "success": metrics.Success, - "latencies": map[string]interface{}{ - "min": metrics.Latencies.Min.Seconds(), - "mean": metrics.Latencies.Mean.Seconds(), - "p50": metrics.Latencies.P50.Seconds(), - "p90": metrics.Latencies.P90.Seconds(), - "p95": metrics.Latencies.P95.Seconds(), - "p99": metrics.Latencies.P99.Seconds(), - "max": metrics.Latencies.Max.Seconds(), - }, - "status_codes": metrics.StatusCodes, - "errors": metrics.Errors, - } - - return report, nil -} - -// generateHdrPlot generates HDR histogram plot data -func (tr *TestReport) generateHdrPlot(binaryFile string) (string, error) { - // Read the binary file - file, err := os.Open(binaryFile) - if err != nil { - return "", err - } - defer func(file *os.File) { - err := file.Close() - if err != nil { - log.Printf("Warning: failed to close file: %v", err) - } - }(file) - - // Decode results - dec := vegeta.NewDecoder(file) - - // Create metrics - var metrics vegeta.Metrics - for { - var result vegeta.Result - if err := dec.Decode(&result); err != nil { - if err == io.EOF { - break - } - return "", err - } - metrics.Add(&result) - } - metrics.Close() - - // Generate HDR histogram - var buf bytes.Buffer - histogram := metrics.Histogram - if histogram != nil { - // Print histogram data - for i, bucket := range histogram.Buckets { - _, err := fmt.Fprintf(&buf, "%.6f %d\n", float64(bucket), histogram.Counts[i]) - if err != nil { - return "", err - } - } - } - - return buf.String(), nil -} - -// Close finalises and closes the test report -func (tr *TestReport) Close() error { - // Flush and close the CSV file - if tr.csvWriter != nil { - tr.csvWriter.Flush() - if err := tr.csvWriter.Error(); err != nil { - log.Printf("CSV writer error: %v", err) - } - } - - if tr.csvFile != nil { - if err := tr.csvFile.Close(); err != nil { - return fmt.Errorf("failed to close CSV file: %w", err) - } - } - - // Write the JSON report if enabled - if tr.config.JSONReportFile != "" && tr.jsonReport != nil { - fmt.Printf("Create json file: %s\n", tr.config.JSONReportFile) - - jsonData, err := json.MarshalIndent(tr.jsonReport, "", " ") - if err != nil { - return fmt.Errorf("failed to marshal JSON report: %w", err) - } - - if err := os.WriteFile(tr.config.JSONReportFile, jsonData, 0644); err != nil { - return fmt.Errorf("failed to write JSON report: %w", err) - } - } - - return nil -} - func main() { app := &cli.App{ Name: "rpc_perf", - Usage: "Launch an automated sequence of RPC performance tests on on target blockchain node(s)", + Usage: "Launch an automated sequence of RPC performance tests on target blockchain node(s)", Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "disable-http-compression", - Aliases: []string{"O"}, - Usage: "Disable Http compression", - }, - &cli.BoolFlag{ - Name: "not-verify-server-alive", - Aliases: []string{"Z"}, - Usage: "Don't verify server is still active", - }, - &cli.BoolFlag{ - Name: "tmp-test-report", - Aliases: []string{"R"}, - Usage: "Generate report in tmp directory", - }, - &cli.BoolFlag{ - Name: "test-report", - Aliases: []string{"u"}, - Usage: "Generate report in reports area ready for Git repo", - }, - &cli.BoolFlag{ - Name: "verbose", - Aliases: []string{"v"}, - Usage: "Enable verbose output", - }, - &cli.BoolFlag{ - Name: "tracing", - Aliases: []string{"x"}, - Usage: "Enable verbose and tracing output", - }, - &cli.BoolFlag{ - Name: "empty-cache", - Aliases: []string{"e"}, - Usage: "Empty OS cache before each test", - }, - &cli.StringFlag{ - Name: "max-connections", - Aliases: []string{"C"}, - Value: DefaultMaxConn, - Usage: "Maximum number of connections", - }, - &cli.StringFlag{ - Name: "testing-client", - Aliases: []string{"D"}, - Value: DefaultClientName, - Usage: "Name of testing client", - }, - &cli.StringFlag{ - Name: "blockchain", - Aliases: []string{"b"}, - Value: "mainnet", - Usage: "Blockchain network name", - }, - &cli.StringFlag{ - Name: "test-type", - Aliases: []string{"y"}, - Value: DefaultTestType, - Usage: "Test type (e.g., eth_call, eth_getLogs)", - }, - &cli.StringFlag{ - Name: "pattern-file", - Aliases: []string{"p"}, - Value: DefaultVegetaPatternTarFile, - Usage: "Path to the Vegeta attack pattern file", - }, - &cli.IntFlag{ - Name: "repetitions", - Aliases: []string{"r"}, - Value: DefaultRepetitions, - Usage: "Number of repetitions for each test in sequence", - }, - &cli.StringFlag{ - Name: "test-sequence", - Aliases: []string{"t"}, - Value: DefaultTestSequence, - Usage: "Test sequence as qps:duration,... (e.g., 200:30,400:10)", - }, - &cli.IntFlag{ - Name: "wait-after-test-sequence", - Aliases: []string{"w"}, - Value: DefaultWaitingTime, - Usage: "Wait time between test iterations in seconds", - }, - &cli.StringFlag{ - Name: "rpc-client-address", - Aliases: []string{"d"}, - Value: DefaultServerAddress, - Usage: "Client address (e.g., 192.2.3.1)", - }, - &cli.StringFlag{ - Name: "client-build-dir", - Aliases: []string{"g"}, - Value: DefaultClientBuildDir, - Usage: "Path to Client build folder", - }, - &cli.StringFlag{ - Name: "run-vegeta-on-core", - Aliases: []string{"c"}, - Value: DefaultClientVegetaOnCore, - Usage: "Taskset format for Vegeta (e.g., 0-1:2-3)", - }, - &cli.StringFlag{ - Name: "response-timeout", - Aliases: []string{"T"}, - Value: DefaultVegetaResponseTimeout, - Usage: "Vegeta response timeout", - }, - &cli.StringFlag{ - Name: "max-body-rsp", - Aliases: []string{"M"}, - Value: DefaultMaxBodyRsp, - Usage: "Max bytes to read from response bodies", - }, - &cli.StringFlag{ - Name: "json-report", - Aliases: []string{"j"}, - Usage: "Generate JSON report at specified path", - }, - &cli.BoolFlag{ - Name: "more-percentiles", - Aliases: []string{"P"}, - Usage: "Print more percentiles in console report", - }, - &cli.BoolFlag{ - Name: "halt-on-vegeta-error", - Aliases: []string{"H"}, - Usage: "Consider test failed if Vegeta reports any error", - }, - &cli.BoolFlag{ - Name: "instant-report", - Aliases: []string{"I"}, - Usage: "Print instant Vegeta report for each test", - }, + &cli.BoolFlag{Name: "disable-http-compression", Aliases: []string{"O"}, Usage: "Disable Http compression"}, + &cli.BoolFlag{Name: "not-verify-server-alive", Aliases: []string{"Z"}, Usage: "Don't verify server is still active"}, + &cli.BoolFlag{Name: "tmp-test-report", Aliases: []string{"R"}, Usage: "Generate report in tmp directory"}, + &cli.BoolFlag{Name: "test-report", Aliases: []string{"u"}, Usage: "Generate report in reports area ready for Git repo"}, + &cli.BoolFlag{Name: "verbose", Aliases: []string{"v"}, Usage: "Enable verbose output"}, + &cli.BoolFlag{Name: "tracing", Aliases: []string{"x"}, Usage: "Enable verbose and tracing output"}, + &cli.BoolFlag{Name: "empty-cache", Aliases: []string{"e"}, Usage: "Empty OS cache before each test"}, + &cli.StringFlag{Name: "max-connections", Aliases: []string{"C"}, Value: perf.DefaultMaxConn, Usage: "Maximum number of connections"}, + &cli.StringFlag{Name: "testing-client", Aliases: []string{"D"}, Value: perf.DefaultClientName, Usage: "Name of testing client"}, + &cli.StringFlag{Name: "blockchain", Aliases: []string{"b"}, Value: "mainnet", Usage: "Blockchain network name"}, + &cli.StringFlag{Name: "test-type", Aliases: []string{"y"}, Value: perf.DefaultTestType, Usage: "Test type (e.g., eth_call, eth_getLogs)"}, + &cli.StringFlag{Name: "pattern-file", Aliases: []string{"p"}, Value: perf.DefaultVegetaPatternTarFile, Usage: "Path to the Vegeta attack pattern file"}, + &cli.IntFlag{Name: "repetitions", Aliases: []string{"r"}, Value: perf.DefaultRepetitions, Usage: "Number of repetitions for each test in sequence"}, + &cli.StringFlag{Name: "test-sequence", Aliases: []string{"t"}, Value: perf.DefaultTestSequence, Usage: "Test sequence as qps:duration,..."}, + &cli.IntFlag{Name: "wait-after-test-sequence", Aliases: []string{"w"}, Value: perf.DefaultWaitingTime, Usage: "Wait time between test iterations in seconds"}, + &cli.StringFlag{Name: "rpc-client-address", Aliases: []string{"d"}, Value: perf.DefaultServerAddress, Usage: "Client address"}, + &cli.StringFlag{Name: "client-build-dir", Aliases: []string{"g"}, Value: perf.DefaultClientBuildDir, Usage: "Path to Client build folder"}, + &cli.StringFlag{Name: "run-vegeta-on-core", Aliases: []string{"c"}, Value: perf.DefaultClientVegetaOnCore, Usage: "Taskset format for Vegeta"}, + &cli.StringFlag{Name: "response-timeout", Aliases: []string{"T"}, Value: perf.DefaultVegetaResponseTimeout, Usage: "Vegeta response timeout"}, + &cli.StringFlag{Name: "max-body-rsp", Aliases: []string{"M"}, Value: perf.DefaultMaxBodyRsp, Usage: "Max bytes to read from response bodies"}, + &cli.StringFlag{Name: "json-report", Aliases: []string{"j"}, Usage: "Generate JSON report at specified path"}, + &cli.BoolFlag{Name: "more-percentiles", Aliases: []string{"P"}, Usage: "Print more percentiles in console report"}, + &cli.BoolFlag{Name: "halt-on-vegeta-error", Aliases: []string{"H"}, Usage: "Consider test failed if Vegeta reports any error"}, + &cli.BoolFlag{Name: "instant-report", Aliases: []string{"I"}, Usage: "Print instant Vegeta report for each test"}, }, Action: runPerfTests, } @@ -1736,82 +51,73 @@ func main() { func runPerfTests(c *cli.Context) error { fmt.Println("Performance Test started") - // Create configuration from CLI flags - config := NewConfig() - - config.DisableHttpCompression = c.Bool("disable-http-compression") - config.CheckServerAlive = !c.Bool("not-verify-server-alive") - config.CreateTestReport = c.Bool("tmp-test-report") || c.Bool("test-report") - config.VersionedTestReport = c.Bool("test-report") - config.Verbose = c.Bool("verbose") || c.Bool("tracing") - config.Tracing = c.Bool("tracing") - config.EmptyCache = c.Bool("empty-cache") - - config.MaxConnection = c.String("max-connections") - config.TestingClient = c.String("testing-client") - config.ChainName = c.String("blockchain") - config.TestType = c.String("test-type") - config.VegetaPatternTarFile = c.String("pattern-file") - config.Repetitions = c.Int("repetitions") - config.TestSequence = c.String("test-sequence") - config.WaitingTime = c.Int("wait-after-test-sequence") - config.ClientAddress = c.String("rpc-client-address") - config.ClientBuildDir = c.String("client-build-dir") - config.ClientVegetaOnCore = c.String("run-vegeta-on-core") - config.VegetaResponseTimeout = c.String("response-timeout") - config.MaxBodyRsp = c.String("max-body-rsp") - config.JSONReportFile = c.String("json-report") - config.MorePercentiles = c.Bool("more-percentiles") - config.HaltOnVegetaError = c.Bool("halt-on-vegeta-error") - config.InstantReport = c.Bool("instant-report") - - // Validate configuration - if err := config.Validate(); err != nil { + cfg := perf.NewConfig() + + cfg.DisableHttpCompression = c.Bool("disable-http-compression") + cfg.CheckServerAlive = !c.Bool("not-verify-server-alive") + cfg.CreateTestReport = c.Bool("tmp-test-report") || c.Bool("test-report") + cfg.VersionedTestReport = c.Bool("test-report") + cfg.Verbose = c.Bool("verbose") || c.Bool("tracing") + cfg.Tracing = c.Bool("tracing") + cfg.EmptyCache = c.Bool("empty-cache") + + cfg.MaxConnection = c.String("max-connections") + cfg.TestingClient = c.String("testing-client") + cfg.ChainName = c.String("blockchain") + cfg.TestType = c.String("test-type") + cfg.VegetaPatternTarFile = c.String("pattern-file") + cfg.Repetitions = c.Int("repetitions") + cfg.TestSequence = c.String("test-sequence") + cfg.WaitingTime = c.Int("wait-after-test-sequence") + cfg.ClientAddress = c.String("rpc-client-address") + cfg.ClientBuildDir = c.String("client-build-dir") + cfg.ClientVegetaOnCore = c.String("run-vegeta-on-core") + cfg.VegetaResponseTimeout = c.String("response-timeout") + cfg.MaxBodyRsp = c.String("max-body-rsp") + cfg.JSONReportFile = c.String("json-report") + cfg.MorePercentiles = c.Bool("more-percentiles") + cfg.HaltOnVegetaError = c.Bool("halt-on-vegeta-error") + cfg.InstantReport = c.Bool("instant-report") + + if err := cfg.Validate(); err != nil { return fmt.Errorf("configuration validation failed: %w", err) } - // Parse test sequence - sequence, err := ParseTestSequence(config.TestSequence) + sequence, err := perf.ParseTestSequence(cfg.TestSequence) if err != nil { return fmt.Errorf("failed to parse test sequence: %w", err) } - // Create the test report - testReport := NewTestReport(config) + dirs := perf.NewRunDirs() + testReport := perf.NewTestReport(cfg, dirs) - // Create the performance test - perfTest, err := NewPerfTest(config, testReport) + perfTest, err := perf.NewPerfTest(cfg, testReport, dirs) if err != nil { return fmt.Errorf("failed to initialize performance test: %w", err) } - defer func(perfTest *PerfTest, initial bool) { - err := perfTest.Cleanup(initial) - if err != nil { + defer func() { + if err := perfTest.Cleanup(false); err != nil { log.Printf("Failed to cleanup: %v", err) } - }(perfTest, false) + }() - // Print test configuration fmt.Printf("Test repetitions: %d on sequence: %s for pattern: %s\n", - config.Repetitions, config.TestSequence, config.VegetaPatternTarFile) + cfg.Repetitions, cfg.TestSequence, cfg.VegetaPatternTarFile) - // Open the test report if needed - if config.CreateTestReport { + if cfg.CreateTestReport { if err := testReport.Open(); err != nil { return fmt.Errorf("failed to open test report: %w", err) } - defer func(testReport *TestReport) { - err := testReport.Close() - if err != nil { + defer func() { + if err := testReport.Close(); err != nil { log.Printf("Failed to close test report: %v", err) } - }(testReport) + }() } - // Create context ctx := context.Background() - if err := perfTest.ExecuteSequence(ctx, sequence, config.TestingClient); err != nil { + if err := perfTest.ExecuteSequence(ctx, sequence, cfg.TestingClient); err != nil { fmt.Printf("Performance Test failed, error: %v\n", err) return err } diff --git a/go.mod b/go.mod index a6d76045..dadda692 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/golang-jwt/jwt/v5 v5.3.0 github.com/gorilla/websocket v1.5.3 github.com/josephburnett/jd/v2 v2.3.0 + github.com/json-iterator/go v1.1.12 github.com/tsenart/vegeta/v12 v12.13.0 github.com/urfave/cli/v2 v2.27.7 ) @@ -18,7 +19,6 @@ require ( github.com/google/go-cmp v0.7.0 // indirect github.com/influxdata/tdigest v0.0.1 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/internal/compare/comparator.go b/internal/compare/comparator.go new file mode 100644 index 00000000..2dd95222 --- /dev/null +++ b/internal/compare/comparator.go @@ -0,0 +1,439 @@ +package compare + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "sync" + "time" + + "github.com/josephburnett/jd/v2" + jsoniter "github.com/json-iterator/go" + + "github.com/erigontech/rpc-tests/cmd/integration/jsondiff" + "github.com/erigontech/rpc-tests/internal/config" + "github.com/erigontech/rpc-tests/internal/testdata" +) + +var json = jsoniter.ConfigCompatibleWithStandardLibrary + +var bufPool = sync.Pool{ + New: func() any { return new(bytes.Buffer) }, +} + +var ( + ErrDiffTimeout = errors.New("diff timeout") + ErrDiffMismatch = errors.New("diff mismatch") +) + +const ( + externalToolTimeout = 30 * time.Second +) + +// ProcessResponse compares actual response against expected, handling all "don't care" cases. +// This is the v2 equivalent of v1's processResponse method. +func ProcessResponse( + response, referenceResponse, responseInFile any, + cfg *config.Config, + cmd *testdata.JsonRpcCommand, + outputDir, daemonFile, expRspFile, diffFile string, + outcome *testdata.TestOutcome, +) { + var expectedResponse any + if referenceResponse != nil { + expectedResponse = referenceResponse + } else { + expectedResponse = responseInFile + } + + if cfg.WithoutCompareResults { + err := dumpJSONs(cfg.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) + if err != nil { + outcome.Error = err + return + } + outcome.Success = true + return + } + + // Fast path: structural equality check + if compareResponses(response, expectedResponse) { + outcome.Metrics.EqualCount++ + err := dumpJSONs(cfg.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) + if err != nil { + outcome.Error = err + return + } + outcome.Success = true + return + } + + // Check "don't care" conditions + responseMap, respIsMap := response.(map[string]interface{}) + expectedMap, expIsMap := expectedResponse.(map[string]interface{}) + if respIsMap && expIsMap { + _, responseHasResult := responseMap["result"] + expectedResult, expectedHasResult := expectedMap["result"] + _, responseHasError := responseMap["error"] + expectedError, expectedHasError := expectedMap["error"] + + // Null expected result with a non-nil reference -> accept + if responseHasResult && expectedHasResult && expectedResult == nil && referenceResponse == nil { + err := dumpJSONs(cfg.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) + if err != nil { + outcome.Error = err + return + } + outcome.Success = true + return + } + // Null expected error -> accept + if responseHasError && expectedHasError && expectedError == nil { + err := dumpJSONs(cfg.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) + if err != nil { + outcome.Error = err + return + } + outcome.Success = true + return + } + // Empty expected (just "jsonrpc" + "id") -> accept + if !expectedHasResult && !expectedHasError && len(expectedMap) == 2 { + err := dumpJSONs(cfg.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) + if err != nil { + outcome.Error = err + return + } + outcome.Success = true + return + } + // Both have error and DoNotCompareError -> accept + if responseHasError && expectedHasError && cfg.DoNotCompareError { + err := dumpJSONs(cfg.ForceDumpJSONs, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) + if err != nil { + outcome.Error = err + return + } + outcome.Success = true + return + } + } + + // Detailed comparison: dump files and run diff + err := dumpJSONs(true, daemonFile, expRspFile, outputDir, response, expectedResponse, &outcome.Metrics) + if err != nil { + outcome.Error = err + return + } + + var same bool + if cfg.DiffKind == config.JsonDiffGo { + outcome.Metrics.ComparisonCount++ + opts := &jsondiff.Options{SortArrays: true} + if respIsMap && expIsMap { + diff := jsondiff.DiffJSON(expectedMap, responseMap, opts) + same = len(diff) == 0 + diffString := jsondiff.DiffString(expectedMap, responseMap, opts) + if writeErr := os.WriteFile(diffFile, []byte(diffString), 0644); writeErr != nil { + outcome.Error = writeErr + return + } + if !same { + outcome.Error = ErrDiffMismatch + if cfg.ReqTestNum != -1 { + outcome.ColoredDiff = jsondiff.ColoredString(expectedMap, responseMap, opts) + } + } + } else { + responseArray, respIsArray := response.([]any) + expectedArray, expIsArray := expectedResponse.([]any) + if !respIsArray || !expIsArray { + outcome.Error = errors.New("cannot compare JSON objects (neither maps nor arrays)") + return + } + diff := jsondiff.DiffJSON(expectedArray, responseArray, opts) + same = len(diff) == 0 + diffString := jsondiff.DiffString(expectedArray, responseArray, opts) + if writeErr := os.WriteFile(diffFile, []byte(diffString), 0644); writeErr != nil { + outcome.Error = writeErr + return + } + if !same { + outcome.Error = ErrDiffMismatch + if cfg.ReqTestNum != -1 { + outcome.ColoredDiff = jsondiff.ColoredString(expectedArray, responseArray, opts) + } + } + } + } else { + same, err = compareJSON(cfg, cmd, daemonFile, expRspFile, diffFile, &outcome.Metrics) + if err != nil { + outcome.Error = err + return + } + } + + if same && !cfg.ForceDumpJSONs { + os.Remove(daemonFile) + os.Remove(expRspFile) + os.Remove(diffFile) + } + + outcome.Success = same +} + +// compareResponses does a fast structural equality check. +func compareResponses(lhs, rhs any) bool { + leftMap, leftIsMap := lhs.(map[string]interface{}) + rightMap, rightIsMap := rhs.(map[string]interface{}) + if leftIsMap && rightIsMap { + return mapsEqual(leftMap, rightMap) + } + leftArray, leftIsArray := lhs.([]map[string]interface{}) + rightArray, rightIsArray := rhs.([]map[string]interface{}) + if leftIsArray && rightIsArray { + return arrayEqual(leftArray, rightArray) + } + return jsonValuesEqual(lhs, rhs) +} + +// jsonValuesEqual compares two JSON-decoded values without reflection for common types. +// JSON only produces: string, float64, bool, nil, map[string]interface{}, []interface{}. +func jsonValuesEqual(lhs, rhs any) bool { + if lhs == nil && rhs == nil { + return true + } + if lhs == nil || rhs == nil { + return false + } + switch l := lhs.(type) { + case string: + r, ok := rhs.(string) + return ok && l == r + case float64: + r, ok := rhs.(float64) + return ok && l == r + case bool: + r, ok := rhs.(bool) + return ok && l == r + case map[string]interface{}: + r, ok := rhs.(map[string]interface{}) + return ok && mapsEqual(l, r) + case []interface{}: + r, ok := rhs.([]interface{}) + if !ok || len(l) != len(r) { + return false + } + for i := range l { + if !jsonValuesEqual(l[i], r[i]) { + return false + } + } + return true + default: + return reflect.DeepEqual(lhs, rhs) + } +} + +func mapsEqual(lhs, rhs map[string]interface{}) bool { + if len(lhs) != len(rhs) { + return false + } + for k, lv := range lhs { + rv, ok := rhs[k] + if !ok || !jsonValuesEqual(lv, rv) { + return false + } + } + return true +} + +func arrayEqual(lhs, rhs []map[string]interface{}) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !mapsEqual(lhs[i], rhs[i]) { + return false + } + } + return true +} + +// marshalToFile marshals a value to JSON and writes it to a file using a pooled buffer. +func marshalToFile(value any, filename string, metrics *testdata.TestMetrics) error { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + + start := time.Now() + enc := json.NewEncoder(buf) + enc.SetIndent("", " ") + if err := enc.Encode(value); err != nil { + return err + } + metrics.MarshallingTime += time.Since(start) + + if err := os.WriteFile(filename, buf.Bytes(), 0644); err != nil { + return fmt.Errorf("exception on file write: %v", err) + } + return nil +} + +// dumpJSONs writes actual/expected responses to files if needed. +func dumpJSONs(dump bool, daemonFile, expRspFile, outputDir string, response, expectedResponse any, metrics *testdata.TestMetrics) error { + if !dump { + return nil + } + + if err := os.MkdirAll(outputDir, 0755); err != nil { + return fmt.Errorf("exception on makedirs: %s %v", outputDir, err) + } + + if daemonFile != "" { + if err := marshalToFile(response, daemonFile, metrics); err != nil { + return err + } + } + + if expRspFile != "" { + if err := marshalToFile(expectedResponse, expRspFile, metrics); err != nil { + return err + } + } + return nil +} + +// compareJSON dispatches to the appropriate external diff tool. +func compareJSON(cfg *config.Config, cmd *testdata.JsonRpcCommand, daemonFile, expRspFile, diffFile string, metrics *testdata.TestMetrics) (bool, error) { + metrics.ComparisonCount++ + + switch cfg.DiffKind { + case config.JdLibrary: + return runCompareJD(cmd, expRspFile, daemonFile, diffFile) + case config.JsonDiffTool: + return runExternalCompare(true, "/dev/null", expRspFile, daemonFile, diffFile) + case config.DiffTool: + return runExternalCompare(false, "/dev/null", expRspFile, daemonFile, diffFile) + default: + return false, fmt.Errorf("unknown JSON diff kind: %d", cfg.DiffKind) + } +} + +// runCompareJD uses the JD library for comparison, with 30s timeout and pathOptions support. +func runCompareJD(cmd *testdata.JsonRpcCommand, file1, file2, diffFile string) (bool, error) { + node1, err := jd.ReadJsonFile(file1) + if err != nil { + return false, err + } + node2, err := jd.ReadJsonFile(file2) + if err != nil { + return false, err + } + + type result struct { + diff jd.Diff + err error + } + + resChan := make(chan result, 1) + ctx, cancel := context.WithTimeout(context.Background(), externalToolTimeout) + defer cancel() + + go func() { + var d jd.Diff + if cmd.TestInfo != nil && cmd.TestInfo.Metadata != nil && cmd.TestInfo.Metadata.Response != nil && cmd.TestInfo.Metadata.Response.PathOptions != nil { + options, err := jd.ReadOptionsString(string(cmd.TestInfo.Metadata.Response.PathOptions)) + if err != nil { + resChan <- result{err: err} + return + } + d = node1.Diff(node2, options...) + } else { + d = node1.Diff(node2) + } + resChan <- result{diff: d} + }() + + select { + case <-ctx.Done(): + return false, fmt.Errorf("JSON diff (JD) timeout for files %s and %s", file1, file2) + case res := <-resChan: + if res.err != nil { + return false, res.err + } + diffString := res.diff.Render() + if err := os.WriteFile(diffFile, []byte(diffString), 0644); err != nil { + return false, err + } + // Check if diff file is empty (no differences) + info, err := os.Stat(diffFile) + if err != nil { + return false, err + } + return info.Size() == 0, nil + } +} + +// runExternalCompare runs json-diff or diff as an external process with timeout. +func runExternalCompare(useJsonDiff bool, errorFile, file1, file2, diffFile string) (bool, error) { + var cmdStr string + if useJsonDiff { + if _, err := exec.LookPath("json-diff"); err != nil { + // Fall back to regular diff + useJsonDiff = false + } + } + + if useJsonDiff { + cmdStr = fmt.Sprintf("json-diff -s %s %s > %s 2> %s", file1, file2, diffFile, errorFile) + } else { + cmdStr = fmt.Sprintf("diff %s %s > %s 2> %s", file1, file2, diffFile, errorFile) + } + + ctx, cancel := context.WithTimeout(context.Background(), externalToolTimeout) + defer cancel() + + cmd := exec.CommandContext(ctx, "sh", "-c", cmdStr) + if err := cmd.Run(); err != nil { + // diff returns 1 when files differ, which is not an error for us + if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == 1 && !useJsonDiff { + // diff found differences + } + } + + // Check error file + if errorFile != "/dev/null" { + fi, err := os.Stat(errorFile) + if err == nil && fi.Size() > 0 { + if !useJsonDiff { + return false, fmt.Errorf("diff command produced errors") + } + // Fall back to regular diff + return runExternalCompare(false, errorFile, file1, file2, diffFile) + } + } + + // Check diff file size + fi, err := os.Stat(diffFile) + if err != nil { + return false, err + } + return fi.Size() == 0, nil +} + +// OutputFilePaths returns the standard output file paths for a test. +func OutputFilePaths(outputDir, jsonFile string) (outputAPIFilename, outputDirName, diffFile, daemonFile, expRspFile string) { + outputAPIFilename = filepath.Join(outputDir, strings.TrimSuffix(jsonFile, filepath.Ext(jsonFile))) + outputDirName = filepath.Dir(outputAPIFilename) + diffFile = outputAPIFilename + "-diff.json" + daemonFile = outputAPIFilename + "-response.json" + expRspFile = outputAPIFilename + "-expResponse.json" + return +} diff --git a/internal/compare/comparator_bench_test.go b/internal/compare/comparator_bench_test.go new file mode 100644 index 00000000..13d57d89 --- /dev/null +++ b/internal/compare/comparator_bench_test.go @@ -0,0 +1,93 @@ +package compare + +import ( + "path/filepath" + "testing" + + "github.com/erigontech/rpc-tests/internal/config" + "github.com/erigontech/rpc-tests/internal/testdata" +) + +func BenchmarkCompareResponses_EqualMaps(b *testing.B) { + a := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + c := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + b.ResetTimer() + for i := 0; i < b.N; i++ { + compareResponses(a, c) + } +} + +func BenchmarkCompareResponses_DifferentMaps(b *testing.B) { + a := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + c := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + b.ResetTimer() + for i := 0; i < b.N; i++ { + compareResponses(a, c) + } +} + +func BenchmarkCompareResponses_LargeMap(b *testing.B) { + makeMap := func(n int) map[string]interface{} { + m := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1)} + result := make(map[string]interface{}, n) + for j := 0; j < n; j++ { + result[string(rune('a'+j%26))+string(rune('0'+j/26))] = float64(j) + } + m["result"] = result + return m + } + a := makeMap(100) + c := makeMap(100) + b.ResetTimer() + for i := 0; i < b.N; i++ { + compareResponses(a, c) + } +} + +func BenchmarkProcessResponse_ExactMatch(b *testing.B) { + dir := b.TempDir() + cfg := config.NewConfig() + response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + outcome := &testdata.TestOutcome{} + cmd := &testdata.JsonRpcCommand{} + ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + } +} + +func BenchmarkProcessResponse_DiffMismatch_JsonDiffGo(b *testing.B) { + dir := b.TempDir() + cfg := config.NewConfig() + cfg.DiffKind = config.JsonDiffGo + + daemonFile := filepath.Join(dir, "response.json") + expRspFile := filepath.Join(dir, "expected.json") + diffFile := filepath.Join(dir, "diff.json") + + response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + outcome := &testdata.TestOutcome{} + cmd := &testdata.JsonRpcCommand{} + ProcessResponse(response, nil, expected, cfg, cmd, dir, daemonFile, expRspFile, diffFile, outcome) + } +} + +func BenchmarkDumpJSONs(b *testing.B) { + dir := b.TempDir() + daemonFile := filepath.Join(dir, "daemon.json") + expRspFile := filepath.Join(dir, "expected.json") + response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + metrics := &testdata.TestMetrics{} + dumpJSONs(true, daemonFile, expRspFile, dir, response, expected, metrics) + } +} diff --git a/internal/compare/comparator_test.go b/internal/compare/comparator_test.go new file mode 100644 index 00000000..7bd4e7c6 --- /dev/null +++ b/internal/compare/comparator_test.go @@ -0,0 +1,262 @@ +package compare + +import ( + "os" + "path/filepath" + "testing" + + "github.com/erigontech/rpc-tests/internal/config" + "github.com/erigontech/rpc-tests/internal/testdata" +) + +func TestCompareResponses_EqualMaps(t *testing.T) { + a := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + b := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + if !compareResponses(a, b) { + t.Error("identical maps should be equal") + } +} + +func TestCompareResponses_DifferentMaps(t *testing.T) { + a := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + b := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + if compareResponses(a, b) { + t.Error("different maps should not be equal") + } +} + +func TestCompareResponses_DifferentLengths(t *testing.T) { + a := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1)} + b := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + if compareResponses(a, b) { + t.Error("maps with different lengths should not be equal") + } +} + +func TestCompareResponses_EqualArrays(t *testing.T) { + a := []map[string]interface{}{{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"}} + b := []map[string]interface{}{{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"}} + if !compareResponses(a, b) { + t.Error("identical arrays should be equal") + } +} + +func TestProcessResponse_WithoutCompare(t *testing.T) { + dir := t.TempDir() + cfg := config.NewConfig() + cfg.WithoutCompareResults = true + + outcome := &testdata.TestOutcome{} + cmd := &testdata.JsonRpcCommand{} + response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + + ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + + if !outcome.Success { + t.Error("WithoutCompareResults should always succeed") + } +} + +func TestProcessResponse_ExactMatch(t *testing.T) { + dir := t.TempDir() + cfg := config.NewConfig() + + outcome := &testdata.TestOutcome{} + cmd := &testdata.JsonRpcCommand{} + response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + + ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + + if !outcome.Success { + t.Errorf("exact match should succeed, error: %v", outcome.Error) + } + if outcome.Metrics.EqualCount != 1 { + t.Errorf("EqualCount: got %d, want 1", outcome.Metrics.EqualCount) + } +} + +func TestProcessResponse_NullExpectedResult(t *testing.T) { + dir := t.TempDir() + cfg := config.NewConfig() + + outcome := &testdata.TestOutcome{} + cmd := &testdata.JsonRpcCommand{} + response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0xabc"} + expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": nil} + + ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + + if !outcome.Success { + t.Errorf("null expected result should be accepted, error: %v", outcome.Error) + } +} + +func TestProcessResponse_NullExpectedError(t *testing.T) { + dir := t.TempDir() + cfg := config.NewConfig() + + outcome := &testdata.TestOutcome{} + cmd := &testdata.JsonRpcCommand{} + response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "error": map[string]interface{}{"code": float64(-32000), "message": "some error"}} + expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "error": nil} + + ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + + if !outcome.Success { + t.Errorf("null expected error should be accepted, error: %v", outcome.Error) + } +} + +func TestProcessResponse_EmptyExpected(t *testing.T) { + dir := t.TempDir() + cfg := config.NewConfig() + + outcome := &testdata.TestOutcome{} + cmd := &testdata.JsonRpcCommand{} + response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1)} + + ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + + if !outcome.Success { + t.Errorf("empty expected (just jsonrpc+id) should be accepted, error: %v", outcome.Error) + } +} + +func TestProcessResponse_DoNotCompareError(t *testing.T) { + dir := t.TempDir() + cfg := config.NewConfig() + cfg.DoNotCompareError = true + + outcome := &testdata.TestOutcome{} + cmd := &testdata.JsonRpcCommand{} + response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "error": map[string]interface{}{"code": float64(-32000), "message": "err1"}} + expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "error": map[string]interface{}{"code": float64(-32001), "message": "err2"}} + + ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + + if !outcome.Success { + t.Errorf("DoNotCompareError should accept different errors, error: %v", outcome.Error) + } +} + +func TestProcessResponse_DiffMismatch_JsonDiffGo(t *testing.T) { + dir := t.TempDir() + cfg := config.NewConfig() + cfg.DiffKind = config.JsonDiffGo + + daemonFile := filepath.Join(dir, "response.json") + expRspFile := filepath.Join(dir, "expected.json") + diffFile := filepath.Join(dir, "diff.json") + + outcome := &testdata.TestOutcome{} + cmd := &testdata.JsonRpcCommand{} + response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + + ProcessResponse(response, nil, expected, cfg, cmd, dir, daemonFile, expRspFile, diffFile, outcome) + + if outcome.Success { + t.Error("mismatched responses should fail") + } + if outcome.Error == nil { + t.Error("expected ErrDiffMismatch") + } +} + +func TestProcessResponse_DiffMismatch_SingleTest_HasColoredDiff(t *testing.T) { + dir := t.TempDir() + cfg := config.NewConfig() + cfg.DiffKind = config.JsonDiffGo + cfg.ReqTestNum = 1 // single test mode + + daemonFile := filepath.Join(dir, "response.json") + expRspFile := filepath.Join(dir, "expected.json") + diffFile := filepath.Join(dir, "diff.json") + + outcome := &testdata.TestOutcome{} + cmd := &testdata.JsonRpcCommand{} + response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + + ProcessResponse(response, nil, expected, cfg, cmd, dir, daemonFile, expRspFile, diffFile, outcome) + + if outcome.ColoredDiff == "" { + t.Error("single test mode should produce colored diff on mismatch") + } +} + +func TestDumpJSONs_WritesFiles(t *testing.T) { + dir := t.TempDir() + daemonFile := filepath.Join(dir, "daemon.json") + expRspFile := filepath.Join(dir, "expected.json") + metrics := &testdata.TestMetrics{} + + response := map[string]interface{}{"result": "0x1"} + expected := map[string]interface{}{"result": "0x2"} + + err := dumpJSONs(true, daemonFile, expRspFile, dir, response, expected, metrics) + if err != nil { + t.Fatalf("dumpJSONs: %v", err) + } + + if _, err := os.Stat(daemonFile); os.IsNotExist(err) { + t.Error("daemon file should be written") + } + if _, err := os.Stat(expRspFile); os.IsNotExist(err) { + t.Error("expected file should be written") + } + if metrics.MarshallingTime == 0 { + t.Error("MarshallingTime should be > 0") + } +} + +func TestDumpJSONs_SkipsWhenFalse(t *testing.T) { + dir := t.TempDir() + daemonFile := filepath.Join(dir, "daemon.json") + metrics := &testdata.TestMetrics{} + + err := dumpJSONs(false, daemonFile, "", dir, nil, nil, metrics) + if err != nil { + t.Fatalf("dumpJSONs: %v", err) + } + + if _, err := os.Stat(daemonFile); !os.IsNotExist(err) { + t.Error("daemon file should NOT be written when dump=false") + } +} + +func TestOutputFilePaths(t *testing.T) { + apiFile, dirName, diff, daemon, exp := OutputFilePaths("/output", "eth_call/test_01.json") + + if !filepath.IsAbs(apiFile) || !contains(apiFile, "eth_call") { + t.Errorf("apiFile: got %q", apiFile) + } + if !contains(dirName, "eth_call") { + t.Errorf("dirName: got %q", dirName) + } + if !contains(diff, "-diff.json") { + t.Errorf("diffFile: got %q", diff) + } + if !contains(daemon, "-response.json") { + t.Errorf("daemonFile: got %q", daemon) + } + if !contains(exp, "-expResponse.json") { + t.Errorf("expRspFile: got %q", exp) + } +} + +func contains(s, substr string) bool { + return len(s) > 0 && len(substr) > 0 && filepath.ToSlash(s) != "" && containsStr(s, substr) +} + +func containsStr(s, substr string) bool { + for i := 0; i+len(substr) <= len(s); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 00000000..ee01b293 --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,321 @@ +package config + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + jsoniter "github.com/json-iterator/go" +) + +const ( + DaemonOnDefaultPort = "rpcdaemon" + DaemonOnOtherPort = "other-daemon" + ExternalProvider = "external-provider" + None = "none" + + TransportHTTP = "http" + TransportHTTPComp = "http_comp" + TransportHTTPS = "https" + TransportWebSocket = "websocket" + TransportWebSocketComp = "websocket_comp" + + DefaultServerPort = 8545 + DefaultEnginePort = 8551 + DefaultOtherPort = 51515 + DefaultOtherEnginePort = 51516 + + TempDirName = "./temp_rpc_tests" + ResultsDir = "results" +) + +// JSON is the json-iterator API used across the application for fast JSON operations. +var JSON = jsoniter.ConfigCompatibleWithStandardLibrary + +// DiffKind represents the JSON diff strategy to use. +type DiffKind int + +const ( + JdLibrary DiffKind = iota + JsonDiffTool + DiffTool + JsonDiffGo +) + +func (k DiffKind) String() string { + return [...]string{"jd", "json-diff", "diff", "json-diff-go"}[k] +} + +// ParseDiffKind converts a string into a DiffKind enum type. +func ParseDiffKind(s string) (DiffKind, error) { + switch strings.ToLower(s) { + case "jd": + return JdLibrary, nil + case "json-diff": + return JsonDiffTool, nil + case "diff": + return DiffTool, nil + case "json-diff-go": + return JsonDiffGo, nil + default: + return JdLibrary, fmt.Errorf("invalid DiffKind value: %s", s) + } +} + +// Config holds all configuration for the test runner. +type Config struct { + // Test execution + ExitOnFail bool + Parallel bool + LoopNumber int + StartTest string + ReqTestNum int + WaitingTime int + + // Output control + VerboseLevel int + DisplayOnlyFail bool + ForceDumpJSONs bool + DiffKind DiffKind + DoNotCompareError bool + WithoutCompareResults bool + + // Network and paths + Net string + JSONDir string + ResultsDir string + OutputDir string + + // Daemon configuration + DaemonUnderTest string + DaemonAsReference string + DaemonOnHost string + ServerPort int + EnginePort int + VerifyWithDaemon bool + ExternalProviderURL string + LocalServer string + + // Test filtering + TestingAPIs string // Exact match (-A) + TestingAPIsWith string // Pattern match (-a) + ExcludeAPIList string + ExcludeTestList string + TestsOnLatestBlock bool + + // Authentication + JWTSecret string + + // Transport + TransportType string + + // Archive handling + SanitizeArchiveExt bool + + // Profiling + CpuProfile string + MemProfile string + TraceFile string + + // Cached derived values (set by UpdateDirs) + StartTestNum int // parsed StartTest, cached for zero-alloc lookups +} + +// NewConfig creates a Config with sensible defaults matching v1 behavior. +func NewConfig() *Config { + return &Config{ + ExitOnFail: true, + Parallel: true, + LoopNumber: 1, + ReqTestNum: -1, + VerboseLevel: 0, + Net: "mainnet", + DaemonOnHost: "localhost", + ServerPort: 0, + EnginePort: 0, + DaemonUnderTest: DaemonOnDefaultPort, + DaemonAsReference: None, + DiffKind: JsonDiffGo, + TransportType: TransportHTTP, + ResultsDir: ResultsDir, + } +} + +// Validate checks the configuration for conflicts and invalid values. +func (c *Config) Validate() error { + if c.WaitingTime > 0 && c.Parallel { + return fmt.Errorf("waiting-time is not compatible with parallel tests") + } + if c.DaemonUnderTest == DaemonOnOtherPort && c.VerifyWithDaemon && c.DaemonAsReference == DaemonOnDefaultPort { + return fmt.Errorf("daemon-port is not compatible with compare-erigon-rpcdaemon") + } + if c.ReqTestNum != -1 && (c.ExcludeTestList != "" || c.ExcludeAPIList != "") { + return fmt.Errorf("run-test is not compatible with exclude-api-list or exclude-test-list") + } + if c.TestingAPIs != "" && c.ExcludeAPIList != "" { + return fmt.Errorf("api-list is not compatible with exclude-api-list") + } + if c.VerifyWithDaemon && c.WithoutCompareResults { + return fmt.Errorf("compare-erigon-rpcdaemon is not compatible with without-compare-results") + } + + // Validate transport types + if c.TransportType != "" { + types := strings.Split(c.TransportType, ",") + for _, t := range types { + if !IsValidTransport(t) { + return fmt.Errorf("invalid connection type: %s", t) + } + } + } + + return nil +} + +// IsValidTransport checks if a transport type string is valid. +func IsValidTransport(t string) bool { + switch t { + case TransportHTTP, TransportHTTPComp, TransportHTTPS, TransportWebSocket, TransportWebSocketComp: + return true + default: + return false + } +} + +// UpdateDirs sets derived directory paths and cached values based on current configuration. +func (c *Config) UpdateDirs() { + c.JSONDir = "./integration/" + c.Net + "/" + c.OutputDir = c.JSONDir + c.ResultsDir + "/" + if c.ServerPort == 0 { + c.ServerPort = DefaultServerPort + } + if c.EnginePort == 0 { + c.EnginePort = DefaultEnginePort + } + c.LocalServer = "http://" + c.DaemonOnHost + ":" + strconv.Itoa(c.ServerPort) + + // Cache parsed StartTest for zero-alloc lookups in the scheduling loop + if c.StartTest != "" { + c.StartTestNum, _ = strconv.Atoi(c.StartTest) + } +} + +// GetTarget returns the target URL for an RPC method given a daemon target type. +func (c *Config) GetTarget(targetType, method string) string { + isEngine := strings.HasPrefix(method, "engine_") + + if targetType == ExternalProvider { + return c.ExternalProviderURL + } + + if c.VerifyWithDaemon && targetType == DaemonOnOtherPort && isEngine { + return c.DaemonOnHost + ":" + strconv.Itoa(DefaultOtherEnginePort) + } + if c.VerifyWithDaemon && targetType == DaemonOnOtherPort { + return c.DaemonOnHost + ":" + strconv.Itoa(DefaultOtherPort) + } + if targetType == DaemonOnOtherPort && isEngine { + return c.DaemonOnHost + ":" + strconv.Itoa(DefaultOtherEnginePort) + } + if targetType == DaemonOnOtherPort { + return c.DaemonOnHost + ":" + strconv.Itoa(DefaultOtherPort) + } + + if isEngine { + port := c.EnginePort + if port == 0 { + port = DefaultEnginePort + } + return c.DaemonOnHost + ":" + strconv.Itoa(port) + } + + port := c.ServerPort + if port == 0 { + port = DefaultServerPort + } + return c.DaemonOnHost + ":" + strconv.Itoa(port) +} + +// GetJSONFilenameExt returns the JSON filename extension based on daemon type and target. +func GetJSONFilenameExt(targetType, target string) string { + parts := strings.Split(target, ":") + port := "" + if len(parts) > 1 { + port = parts[1] + } + + if targetType == DaemonOnOtherPort { + return "_" + port + "-daemon.json" + } + if targetType == ExternalProvider { + return "-external_provider_url.json" + } + return "_" + port + "-rpcdaemon.json" +} + +// ServerEndpoints returns a human-readable description of the server endpoints. +func (c *Config) ServerEndpoints() string { + if c.VerifyWithDaemon { + if c.DaemonAsReference == ExternalProvider { + return "both servers (rpcdaemon with " + c.ExternalProviderURL + ")" + } + return "both servers (rpcdaemon with " + c.DaemonUnderTest + ")" + } + target := c.GetTarget(c.DaemonUnderTest, "eth_call") + target1 := c.GetTarget(c.DaemonUnderTest, "engine_") + return target + "/" + target1 +} + +// TransportTypes returns the list of transport types as a slice. +func (c *Config) TransportTypes() []string { + return strings.Split(c.TransportType, ",") +} + +// CleanOutputDir removes and recreates the output directory. +func (c *Config) CleanOutputDir() error { + if _, err := os.Stat(c.OutputDir); err == nil { + if err := os.RemoveAll(c.OutputDir); err != nil { + return err + } + } + return os.MkdirAll(c.OutputDir, 0755) +} + +// ResultsAbsDir returns the absolute path to the results directory. +func (c *Config) ResultsAbsDir() (string, error) { + return filepath.Abs(c.ResultsDir) +} + +// GetJWTSecret reads a JWT secret from a file. +func GetJWTSecret(filename string) (string, error) { + data, err := os.ReadFile(filename) + if err != nil { + return "", err + } + contents := string(data) + if len(contents) >= 2 && contents[:2] == "0x" { + return contents[2:], nil + } + return strings.TrimSpace(contents), nil +} + +// GenerateJWTSecret creates a new JWT secret file with random hex data. +func GenerateJWTSecret(filename string, length int) error { + if length <= 0 { + length = 64 + } + randomBytes := make([]byte, length/2) + if _, err := rand.Read(randomBytes); err != nil { + return err + } + randomHex := "0x" + hex.EncodeToString(randomBytes) + if err := os.WriteFile(filename, []byte(randomHex), 0600); err != nil { + return err + } + fmt.Printf("Secret File '%s' created with success!\n", filename) + return nil +} diff --git a/internal/config/config_test.go b/internal/config/config_test.go new file mode 100644 index 00000000..ec847a12 --- /dev/null +++ b/internal/config/config_test.go @@ -0,0 +1,329 @@ +package config + +import ( + "os" + "path/filepath" + "testing" +) + +func TestNewConfig_Defaults(t *testing.T) { + c := NewConfig() + + if !c.ExitOnFail { + t.Error("ExitOnFail should default to true") + } + if !c.Parallel { + t.Error("Parallel should default to true") + } + if c.LoopNumber != 1 { + t.Errorf("LoopNumber: got %d, want 1", c.LoopNumber) + } + if c.ReqTestNum != -1 { + t.Errorf("ReqTestNum: got %d, want -1", c.ReqTestNum) + } + if c.Net != "mainnet" { + t.Errorf("Net: got %q, want %q", c.Net, "mainnet") + } + if c.DaemonOnHost != "localhost" { + t.Errorf("DaemonOnHost: got %q, want %q", c.DaemonOnHost, "localhost") + } + if c.DiffKind != JsonDiffGo { + t.Errorf("DiffKind: got %v, want %v", c.DiffKind, JsonDiffGo) + } + if c.TransportType != TransportHTTP { + t.Errorf("TransportType: got %q, want %q", c.TransportType, TransportHTTP) + } + if c.DaemonUnderTest != DaemonOnDefaultPort { + t.Errorf("DaemonUnderTest: got %q, want %q", c.DaemonUnderTest, DaemonOnDefaultPort) + } + if c.DaemonAsReference != None { + t.Errorf("DaemonAsReference: got %q, want %q", c.DaemonAsReference, None) + } +} + +func TestValidate_WaitingTimeParallel(t *testing.T) { + c := NewConfig() + c.WaitingTime = 100 + c.Parallel = true + if err := c.Validate(); err == nil { + t.Error("expected error for waiting-time with parallel") + } +} + +func TestValidate_DaemonPortWithCompare(t *testing.T) { + c := NewConfig() + c.DaemonUnderTest = DaemonOnOtherPort + c.VerifyWithDaemon = true + c.DaemonAsReference = DaemonOnDefaultPort + if err := c.Validate(); err == nil { + t.Error("expected error for daemon-port with compare") + } +} + +func TestValidate_RunTestWithExclude(t *testing.T) { + c := NewConfig() + c.ReqTestNum = 5 + c.ExcludeTestList = "1,2,3" + if err := c.Validate(); err == nil { + t.Error("expected error for run-test with exclude-test-list") + } +} + +func TestValidate_ApiListWithExcludeApi(t *testing.T) { + c := NewConfig() + c.TestingAPIs = "eth_call" + c.ExcludeAPIList = "eth_getBalance" + if err := c.Validate(); err == nil { + t.Error("expected error for api-list with exclude-api-list") + } +} + +func TestValidate_CompareWithoutCompare(t *testing.T) { + c := NewConfig() + c.VerifyWithDaemon = true + c.WithoutCompareResults = true + if err := c.Validate(); err == nil { + t.Error("expected error for compare with without-compare") + } +} + +func TestValidate_InvalidTransport(t *testing.T) { + c := NewConfig() + c.TransportType = "invalid" + if err := c.Validate(); err == nil { + t.Error("expected error for invalid transport type") + } +} + +func TestValidate_ValidConfig(t *testing.T) { + c := NewConfig() + if err := c.Validate(); err != nil { + t.Errorf("valid config should not error: %v", err) + } +} + +func TestUpdateDirs(t *testing.T) { + c := NewConfig() + c.Net = "sepolia" + c.UpdateDirs() + + if c.JSONDir != "./integration/sepolia/" { + t.Errorf("JSONDir: got %q, want %q", c.JSONDir, "./integration/sepolia/") + } + if c.OutputDir != "./integration/sepolia/results/" { + t.Errorf("OutputDir: got %q, want %q", c.OutputDir, "./integration/sepolia/results/") + } + if c.ServerPort != DefaultServerPort { + t.Errorf("ServerPort: got %d, want %d", c.ServerPort, DefaultServerPort) + } + if c.EnginePort != DefaultEnginePort { + t.Errorf("EnginePort: got %d, want %d", c.EnginePort, DefaultEnginePort) + } + if c.LocalServer != "http://localhost:8545" { + t.Errorf("LocalServer: got %q, want %q", c.LocalServer, "http://localhost:8545") + } +} + +func TestUpdateDirs_CustomPorts(t *testing.T) { + c := NewConfig() + c.ServerPort = 9090 + c.EnginePort = 9091 + c.UpdateDirs() + + if c.ServerPort != 9090 { + t.Errorf("ServerPort: got %d, want 9090", c.ServerPort) + } + if c.EnginePort != 9091 { + t.Errorf("EnginePort: got %d, want 9091", c.EnginePort) + } +} + +func TestGetTarget(t *testing.T) { + c := NewConfig() + c.UpdateDirs() + + tests := []struct { + name string + targetType string + method string + want string + }{ + {"default eth_call", DaemonOnDefaultPort, "eth_call", "localhost:8545"}, + {"default engine_", DaemonOnDefaultPort, "engine_exchangeCapabilities", "localhost:8551"}, + {"other port eth", DaemonOnOtherPort, "eth_call", "localhost:51515"}, + {"other port engine", DaemonOnOtherPort, "engine_exchangeCapabilities", "localhost:51516"}, + {"external provider", ExternalProvider, "eth_call", ""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.targetType == ExternalProvider { + c.ExternalProviderURL = "http://example.com" + tt.want = "http://example.com" + } + got := c.GetTarget(tt.targetType, tt.method) + if got != tt.want { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + } +} + +func TestGetJSONFilenameExt(t *testing.T) { + tests := []struct { + targetType string + target string + want string + }{ + {DaemonOnOtherPort, "localhost:51515", "_51515-daemon.json"}, + {ExternalProvider, "http://example.com", "-external_provider_url.json"}, + {DaemonOnDefaultPort, "localhost:8545", "_8545-rpcdaemon.json"}, + } + + for _, tt := range tests { + got := GetJSONFilenameExt(tt.targetType, tt.target) + if got != tt.want { + t.Errorf("GetJSONFilenameExt(%q, %q): got %q, want %q", tt.targetType, tt.target, got, tt.want) + } + } +} + +func TestParseDiffKind(t *testing.T) { + tests := []struct { + input string + want DiffKind + err bool + }{ + {"jd", JdLibrary, false}, + {"json-diff", JsonDiffTool, false}, + {"diff", DiffTool, false}, + {"json-diff-go", JsonDiffGo, false}, + {"JD", JdLibrary, false}, + {"invalid", JdLibrary, true}, + } + + for _, tt := range tests { + got, err := ParseDiffKind(tt.input) + if (err != nil) != tt.err { + t.Errorf("ParseDiffKind(%q): error = %v, wantErr %v", tt.input, err, tt.err) + } + if !tt.err && got != tt.want { + t.Errorf("ParseDiffKind(%q): got %v, want %v", tt.input, got, tt.want) + } + } +} + +func TestDiffKind_String(t *testing.T) { + tests := []struct { + kind DiffKind + want string + }{ + {JdLibrary, "jd"}, + {JsonDiffTool, "json-diff"}, + {DiffTool, "diff"}, + {JsonDiffGo, "json-diff-go"}, + } + + for _, tt := range tests { + if got := tt.kind.String(); got != tt.want { + t.Errorf("DiffKind(%d).String(): got %q, want %q", tt.kind, got, tt.want) + } + } +} + +func TestIsValidTransport(t *testing.T) { + valid := []string{"http", "http_comp", "https", "websocket", "websocket_comp"} + for _, v := range valid { + if !IsValidTransport(v) { + t.Errorf("IsValidTransport(%q) should be true", v) + } + } + + invalid := []string{"tcp", "grpc", "ftp", ""} + for _, v := range invalid { + if IsValidTransport(v) { + t.Errorf("IsValidTransport(%q) should be false", v) + } + } +} + +func TestTransportTypes(t *testing.T) { + c := NewConfig() + c.TransportType = "http,websocket" + types := c.TransportTypes() + if len(types) != 2 || types[0] != "http" || types[1] != "websocket" { + t.Errorf("TransportTypes: got %v", types) + } +} + +func TestServerEndpoints(t *testing.T) { + c := NewConfig() + c.UpdateDirs() + + endpoints := c.ServerEndpoints() + if endpoints != "localhost:8545/localhost:8551" { + t.Errorf("ServerEndpoints: got %q", endpoints) + } +} + +func TestServerEndpoints_VerifyWithDaemon(t *testing.T) { + c := NewConfig() + c.UpdateDirs() + c.VerifyWithDaemon = true + c.DaemonAsReference = ExternalProvider + c.ExternalProviderURL = "http://infura.io" + + endpoints := c.ServerEndpoints() + want := "both servers (rpcdaemon with http://infura.io)" + if endpoints != want { + t.Errorf("ServerEndpoints: got %q, want %q", endpoints, want) + } +} + +func TestJWTSecret_RoundTrip(t *testing.T) { + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "jwt.hex") + + if err := GenerateJWTSecret(path, 64); err != nil { + t.Fatalf("GenerateJWTSecret: %v", err) + } + + secret, err := GetJWTSecret(path) + if err != nil { + t.Fatalf("GetJWTSecret: %v", err) + } + + if len(secret) != 64 { + t.Errorf("secret length: got %d, want 64", len(secret)) + } + + // Verify it's valid hex + for _, c := range secret { + if !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f')) { + t.Errorf("secret contains non-hex char: %c", c) + } + } +} + +func TestGetJWTSecret_FileNotFound(t *testing.T) { + _, err := GetJWTSecret("/nonexistent/path") + if err == nil { + t.Error("expected error for nonexistent file") + } +} + +func TestGetJWTSecret_Without0xPrefix(t *testing.T) { + tmpDir := t.TempDir() + path := filepath.Join(tmpDir, "jwt.hex") + if err := os.WriteFile(path, []byte("abcdef1234567890"), 0600); err != nil { + t.Fatal(err) + } + + secret, err := GetJWTSecret(path) + if err != nil { + t.Fatalf("GetJWTSecret: %v", err) + } + if secret != "abcdef1234567890" { + t.Errorf("got %q, want %q", secret, "abcdef1234567890") + } +} diff --git a/internal/filter/filter.go b/internal/filter/filter.go new file mode 100644 index 00000000..50a42b6e --- /dev/null +++ b/internal/filter/filter.go @@ -0,0 +1,204 @@ +package filter + +import ( + "strconv" + "strings" +) + +// FilterConfig provides the configuration fields needed by TestFilter. +// This avoids a direct dependency on the config package. +type FilterConfig struct { + Net string + ReqTestNum int + TestingAPIs string + TestingAPIsWith string + ExcludeAPIList string + ExcludeTestList string + TestsOnLatestBlock bool + DoNotCompareError bool +} + +// TestFilter handles all test filtering logic, matching v1 behavior exactly. +// Pre-computes split lists and sets at construction time for zero-alloc lookups. +type TestFilter struct { + cfg FilterConfig + + // Pre-split lists (computed once at construction) + excludeAPIs []string + excludeTestSet map[int]struct{} // O(1) lookup by test number + testingAPIsList []string + testingWithList []string + useDefaultSkip bool +} + +// New creates a new TestFilter from the given configuration. +// Pre-splits comma-separated lists and builds lookup sets. +func New(cfg FilterConfig) *TestFilter { + f := &TestFilter{cfg: cfg} + + if cfg.ExcludeAPIList != "" { + f.excludeAPIs = strings.Split(cfg.ExcludeAPIList, ",") + } + + if cfg.ExcludeTestList != "" { + parts := strings.Split(cfg.ExcludeTestList, ",") + f.excludeTestSet = make(map[int]struct{}, len(parts)) + for _, p := range parts { + if n, err := strconv.Atoi(p); err == nil { + f.excludeTestSet[n] = struct{}{} + } + } + } + + if cfg.TestingAPIs != "" { + f.testingAPIsList = strings.Split(cfg.TestingAPIs, ",") + } + + if cfg.TestingAPIsWith != "" { + f.testingWithList = strings.Split(cfg.TestingAPIsWith, ",") + } + + // Default skip list applies when no specific test/API is requested and no exclude filters are set. + f.useDefaultSkip = (cfg.ReqTestNum == -1 || cfg.TestingAPIs != "" || cfg.TestingAPIsWith != "") && + !(cfg.ReqTestNum != -1 && (cfg.TestingAPIs != "" || cfg.TestingAPIsWith != "")) && + cfg.ExcludeAPIList == "" && cfg.ExcludeTestList == "" + + return f +} + +// IsSkipped determines if a test should be skipped. +// This matches v1 isSkipped() exactly. +func (f *TestFilter) IsSkipped(currAPI, testName string, globalTestNumber int) bool { + apiFullName := f.cfg.Net + "/" + currAPI + apiFullTestName := f.cfg.Net + "/" + testName + + if f.useDefaultSkip { + for _, currTestName := range apiNotCompared { + if strings.Contains(apiFullName, currTestName) { + return true + } + } + } + + for _, excludeAPI := range f.excludeAPIs { + if strings.Contains(apiFullName, excludeAPI) || strings.Contains(apiFullTestName, excludeAPI) { + return true + } + } + + if f.excludeTestSet != nil { + if _, excluded := f.excludeTestSet[globalTestNumber]; excluded { + return true + } + } + + return false +} + +// APIUnderTest determines if a test should run based on API/pattern/latest filters. +// This matches v1 apiUnderTest() exactly. +func (f *TestFilter) APIUnderTest(currAPI, testName string) bool { + if len(f.testingWithList) == 0 && len(f.testingAPIsList) == 0 && !f.cfg.TestsOnLatestBlock { + return true + } + + if len(f.testingWithList) > 0 { + for _, test := range f.testingWithList { + if strings.Contains(currAPI, test) { + if f.cfg.TestsOnLatestBlock && f.VerifyInLatestList(testName) { + return true + } + if f.cfg.TestsOnLatestBlock { + return false + } + return true + } + } + return false + } + + if len(f.testingAPIsList) > 0 { + for _, test := range f.testingAPIsList { + if test == currAPI { + if f.cfg.TestsOnLatestBlock && f.VerifyInLatestList(testName) { + return true + } + if f.cfg.TestsOnLatestBlock { + return false + } + return true + } + } + return false + } + + if f.cfg.TestsOnLatestBlock { + return f.VerifyInLatestList(testName) + } + + return false +} + +// VerifyInLatestList checks if a test is in the latest block list. +// This matches v1 verifyInLatestList() exactly. +func (f *TestFilter) VerifyInLatestList(testName string) bool { + apiFullTestName := f.cfg.Net + "/" + testName + if f.cfg.TestsOnLatestBlock { + for _, currTest := range testsOnLatest { + if strings.Contains(apiFullTestName, currTest) { + return true + } + } + } + return false +} + +// CheckTestNameForNumber checks if a test filename like "test_01.json" matches a requested +// test number. Zero-alloc: extracts the number after the last "_" without regex. +func CheckTestNameForNumber(testName string, reqTestNumber int) bool { + if reqTestNumber == -1 { + return true + } + idx := strings.LastIndex(testName, "_") + if idx < 0 || idx+1 >= len(testName) { + return false + } + numStr := testName[idx+1:] + end := 0 + for end < len(numStr) && numStr[end] >= '0' && numStr[end] <= '9' { + end++ + } + if end == 0 { + return false + } + n, err := strconv.Atoi(numStr[:end]) + if err != nil { + return false + } + return n == reqTestNumber +} + +// ShouldCompareMessage checks if the message field should be compared for a given test. +func (f *TestFilter) ShouldCompareMessage(testPath string) bool { + fullPath := f.cfg.Net + "/" + testPath + for _, pattern := range testsNotComparedMessage { + if pattern == fullPath { + return false + } + } + return true +} + +// ShouldCompareError checks if the error field should be compared for a given test. +func (f *TestFilter) ShouldCompareError(testPath string) bool { + if f.cfg.DoNotCompareError { + return false + } + fullPath := f.cfg.Net + "/" + testPath + for _, pattern := range testsNotComparedError { + if pattern == fullPath { + return false + } + } + return true +} diff --git a/internal/filter/filter_bench_test.go b/internal/filter/filter_bench_test.go new file mode 100644 index 00000000..7d02b0f8 --- /dev/null +++ b/internal/filter/filter_bench_test.go @@ -0,0 +1,59 @@ +package filter + +import "testing" + +func BenchmarkAPIUnderTest_NoFilters(b *testing.B) { + f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1}) + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.APIUnderTest("eth_call", "eth_call/test_01.json") + } +} + +func BenchmarkAPIUnderTest_WithExactAPI(b *testing.B) { + f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1, TestingAPIs: "eth_call"}) + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.APIUnderTest("eth_call", "eth_call/test_01.json") + } +} + +func BenchmarkAPIUnderTest_WithPattern(b *testing.B) { + f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1, TestingAPIsWith: "eth_"}) + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.APIUnderTest("eth_call", "eth_call/test_01.json") + } +} + +func BenchmarkAPIUnderTest_WithExclude(b *testing.B) { + f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1, ExcludeAPIList: "eth_call,eth_getBalance,debug_traceCall"}) + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.APIUnderTest("eth_getLogs", "eth_getLogs/test_01.json") + } +} + +func BenchmarkIsSkipped_DefaultList(b *testing.B) { + f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1}) + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.IsSkipped("eth_call", "eth_call/test_01.json", 1) + } +} + +func BenchmarkIsSkipped_LatestBlock(b *testing.B) { + f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1, TestsOnLatestBlock: true}) + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.IsSkipped("eth_call", "eth_call/test_01.json", 1) + } +} + +func BenchmarkVerifyInLatestList(b *testing.B) { + f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1}) + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.VerifyInLatestList("eth_getBlockByNumber/test_01.json") + } +} diff --git a/internal/filter/filter_test.go b/internal/filter/filter_test.go new file mode 100644 index 00000000..b471e464 --- /dev/null +++ b/internal/filter/filter_test.go @@ -0,0 +1,262 @@ +package filter + +import ( + "testing" +) + +func defaultCfg() FilterConfig { + return FilterConfig{ + Net: "mainnet", + ReqTestNum: -1, + } +} + +func TestIsSkipped_DefaultList(t *testing.T) { + f := New(defaultCfg()) + + // engine_ APIs should be skipped by default + if !f.IsSkipped("engine_getClientVersionV1", "engine_getClientVersionV1/test_01.json", 1) { + t.Error("engine_getClientVersionV1 should be skipped by default") + } + if !f.IsSkipped("engine_exchangeCapabilities", "engine_exchangeCapabilities/test_01.json", 2) { + t.Error("engine_ APIs should be skipped by default") + } + if !f.IsSkipped("trace_rawTransaction", "trace_rawTransaction/test_01.json", 3) { + t.Error("trace_rawTransaction should be skipped by default") + } + + // Normal API should not be skipped + if f.IsSkipped("eth_call", "eth_call/test_01.json", 10) { + t.Error("eth_call should not be skipped by default") + } +} + +func TestIsSkipped_DefaultListDisabledByExcludeAPI(t *testing.T) { + cfg := defaultCfg() + cfg.ExcludeAPIList = "eth_getLogs" + f := New(cfg) + + // When ExcludeAPIList is set, the default skip list is NOT applied + if f.IsSkipped("engine_getClientVersionV1", "engine_getClientVersionV1/test_01.json", 1) { + t.Error("default skip list should be disabled when ExcludeAPIList is set") + } + + // But the explicit exclude should work + if !f.IsSkipped("eth_getLogs", "eth_getLogs/test_01.json", 10) { + t.Error("eth_getLogs should be excluded by ExcludeAPIList") + } +} + +func TestIsSkipped_ExcludeTestList(t *testing.T) { + cfg := defaultCfg() + cfg.ExcludeTestList = "5,10,15" + f := New(cfg) + + if !f.IsSkipped("eth_call", "eth_call/test_01.json", 5) { + t.Error("test 5 should be excluded") + } + if !f.IsSkipped("eth_call", "eth_call/test_01.json", 10) { + t.Error("test 10 should be excluded") + } + if f.IsSkipped("eth_call", "eth_call/test_01.json", 7) { + t.Error("test 7 should not be excluded") + } +} + +func TestIsSkipped_ExcludeAPIPattern(t *testing.T) { + cfg := defaultCfg() + cfg.ExcludeAPIList = "eth_getLogs/test_01,trace_rawTransaction" + f := New(cfg) + + if !f.IsSkipped("eth_getLogs", "eth_getLogs/test_01.json", 1) { + t.Error("eth_getLogs/test_01 should be excluded") + } + if f.IsSkipped("eth_getLogs", "eth_getLogs/test_02.json", 2) { + t.Error("eth_getLogs/test_02 should not be excluded") + } + if !f.IsSkipped("trace_rawTransaction", "trace_rawTransaction/test_01.json", 3) { + t.Error("trace_rawTransaction should be excluded") + } +} + +func TestIsSkipped_DefaultListDisabledByReqTestAndAPI(t *testing.T) { + // When both ReqTestNum and TestingAPIs are set, the v1 condition evaluates to false + // so the default skip list is NOT applied (the XOR-like condition excludes this combo) + cfg := defaultCfg() + cfg.ReqTestNum = 5 + cfg.TestingAPIs = "engine_getClientVersionV1" + f := New(cfg) + + if f.IsSkipped("engine_getClientVersionV1", "engine_getClientVersionV1/test_01.json", 5) { + t.Error("default skip list should NOT apply when both ReqTestNum and TestingAPIs are set") + } +} + +func TestAPIUnderTest_NoFilters(t *testing.T) { + f := New(defaultCfg()) + + if !f.APIUnderTest("eth_call", "eth_call/test_01.json") { + t.Error("with no filters, all APIs should be under test") + } +} + +func TestAPIUnderTest_ExactAPI(t *testing.T) { + cfg := defaultCfg() + cfg.TestingAPIs = "eth_call" + f := New(cfg) + + if !f.APIUnderTest("eth_call", "eth_call/test_01.json") { + t.Error("eth_call should match exact API filter") + } + if f.APIUnderTest("eth_getBalance", "eth_getBalance/test_01.json") { + t.Error("eth_getBalance should not match exact API filter for eth_call") + } +} + +func TestAPIUnderTest_MultipleExactAPIs(t *testing.T) { + cfg := defaultCfg() + cfg.TestingAPIs = "eth_call,eth_getBalance" + f := New(cfg) + + if !f.APIUnderTest("eth_call", "eth_call/test_01.json") { + t.Error("eth_call should match") + } + if !f.APIUnderTest("eth_getBalance", "eth_getBalance/test_01.json") { + t.Error("eth_getBalance should match") + } + if f.APIUnderTest("eth_getCode", "eth_getCode/test_01.json") { + t.Error("eth_getCode should not match") + } +} + +func TestAPIUnderTest_PatternAPI(t *testing.T) { + cfg := defaultCfg() + cfg.TestingAPIsWith = "eth_" + f := New(cfg) + + if !f.APIUnderTest("eth_call", "eth_call/test_01.json") { + t.Error("eth_call should match pattern eth_") + } + if !f.APIUnderTest("eth_getBalance", "eth_getBalance/test_01.json") { + t.Error("eth_getBalance should match pattern eth_") + } + if f.APIUnderTest("trace_call", "trace_call/test_01.json") { + t.Error("trace_call should not match pattern eth_") + } +} + +func TestAPIUnderTest_LatestBlock(t *testing.T) { + cfg := defaultCfg() + cfg.TestsOnLatestBlock = true + f := New(cfg) + + if !f.APIUnderTest("eth_blockNumber", "eth_blockNumber/test_01.json") { + t.Error("eth_blockNumber is on latest list") + } + if f.APIUnderTest("eth_call", "eth_call/test_01.json") { + t.Error("eth_call/test_01 is NOT on latest list") + } + if !f.APIUnderTest("eth_call", "eth_call/test_20.json") { + t.Error("eth_call/test_20 IS on latest list") + } +} + +func TestAPIUnderTest_PatternWithLatest(t *testing.T) { + cfg := defaultCfg() + cfg.TestingAPIsWith = "eth_call" + cfg.TestsOnLatestBlock = true + f := New(cfg) + + // eth_call/test_20.json is on the latest list + if !f.APIUnderTest("eth_call", "eth_call/test_20.json") { + t.Error("eth_call/test_20 matches pattern and is on latest list") + } + // eth_call/test_01.json is NOT on the latest list + if f.APIUnderTest("eth_call", "eth_call/test_01.json") { + t.Error("eth_call/test_01 matches pattern but is NOT on latest list") + } +} + +func TestVerifyInLatestList(t *testing.T) { + cfg := defaultCfg() + cfg.TestsOnLatestBlock = true + f := New(cfg) + + if !f.VerifyInLatestList("eth_blockNumber/test_01.json") { + t.Error("eth_blockNumber should be in latest list") + } + if !f.VerifyInLatestList("eth_gasPrice/test_01.json") { + t.Error("eth_gasPrice should be in latest list") + } + if f.VerifyInLatestList("eth_call/test_01.json") { + t.Error("eth_call/test_01 should NOT be in latest list") + } +} + +func TestVerifyInLatestList_FlagOff(t *testing.T) { + cfg := defaultCfg() + cfg.TestsOnLatestBlock = false + f := New(cfg) + + if f.VerifyInLatestList("eth_blockNumber/test_01.json") { + t.Error("should return false when flag is off") + } +} + +func TestCheckTestNameForNumber(t *testing.T) { + tests := []struct { + name string + num int + expect bool + }{ + {"test_01.json", 1, true}, + {"test_01.json", 2, false}, + {"test_10.json", 10, true}, + {"test_10.json", 1, false}, + {"test_001.json", 1, true}, + {"test_100.json", 10, false}, + {"test_100.json", 100, true}, + {"test_01.tar", 1, true}, + {"any_name", -1, true}, + } + + for _, tt := range tests { + got := CheckTestNameForNumber(tt.name, tt.num) + if got != tt.expect { + t.Errorf("CheckTestNameForNumber(%q, %d): got %v, want %v", tt.name, tt.num, got, tt.expect) + } + } +} + +func TestShouldCompareError_GlobalFlag(t *testing.T) { + cfg := defaultCfg() + cfg.DoNotCompareError = true + f := New(cfg) + + if f.ShouldCompareError("eth_call/test_01.json") { + t.Error("should not compare error when global flag is set") + } +} + +func TestShouldCompareError_Default(t *testing.T) { + f := New(defaultCfg()) + + if !f.ShouldCompareError("eth_call/test_01.json") { + t.Error("should compare error by default") + } +} + +func TestShouldCompareMessage_Default(t *testing.T) { + f := New(defaultCfg()) + + if !f.ShouldCompareMessage("eth_call/test_01.json") { + t.Error("should compare message by default") + } +} + +func TestTestsOnLatestList_Count(t *testing.T) { + // Verify the list has the expected number of entries from v1 + if len(testsOnLatest) < 100 { + t.Errorf("testsOnLatest has %d entries, expected at least 100", len(testsOnLatest)) + } +} diff --git a/internal/filter/lists.go b/internal/filter/lists.go new file mode 100644 index 00000000..36001118 --- /dev/null +++ b/internal/filter/lists.go @@ -0,0 +1,136 @@ +package filter + +// apiNotCompared contains API paths that are skipped by default (when no explicit filters are set). +var apiNotCompared = []string{ + "mainnet/engine_getClientVersionV1", + "mainnet/trace_rawTransaction", + "mainnet/engine_", +} + +// testsOnLatest contains tests that operate on the latest block. +// These are only run when the -L flag is set. +var testsOnLatest = []string{ + "mainnet/debug_traceBlockByNumber/test_24.json", + "mainnet/debug_traceBlockByNumber/test_30.json", + "mainnet/debug_traceCall/test_22.json", + "mainnet/debug_traceCall/test_33.json", + "mainnet/debug_traceCall/test_34.json", + "mainnet/debug_traceCall/test_35.json", + "mainnet/debug_traceCall/test_36.json", + "mainnet/debug_traceCall/test_37.json", + "mainnet/debug_traceCall/test_38.json", + "mainnet/debug_traceCall/test_39.json", + "mainnet/debug_traceCall/test_40.json", + "mainnet/debug_traceCall/test_41.json", + "mainnet/debug_traceCall/test_42.json", + "mainnet/debug_traceCall/test_43.json", + "mainnet/debug_traceCallMany/test_11.json", + "mainnet/debug_traceCallMany/test_12.json", + "mainnet/eth_blobBaseFee", + "mainnet/eth_blockNumber", + "mainnet/eth_call/test_20.json", + "mainnet/eth_call/test_28.json", + "mainnet/eth_call/test_29.json", + "mainnet/eth_call/test_36.json", + "mainnet/eth_call/test_37.json", + "mainnet/eth_callBundle/test_09.json", + "mainnet/eth_createAccessList/test_18.json", + "mainnet/eth_createAccessList/test_19.json", + "mainnet/eth_createAccessList/test_20.json", + "mainnet/eth_createAccessList/test_22.json", + "mainnet/eth_estimateGas/test_01", + "mainnet/eth_estimateGas/test_02", + "mainnet/eth_estimateGas/test_03", + "mainnet/eth_estimateGas/test_04", + "mainnet/eth_estimateGas/test_05", + "mainnet/eth_estimateGas/test_06", + "mainnet/eth_estimateGas/test_07", + "mainnet/eth_estimateGas/test_08", + "mainnet/eth_estimateGas/test_09", + "mainnet/eth_estimateGas/test_10", + "mainnet/eth_estimateGas/test_11", + "mainnet/eth_estimateGas/test_12", + "mainnet/eth_estimateGas/test_21", + "mainnet/eth_estimateGas/test_22", + "mainnet/eth_estimateGas/test_23", + "mainnet/eth_estimateGas/test_27", + "mainnet/eth_feeHistory/test_07.json", + "mainnet/eth_feeHistory/test_22.json", + "mainnet/eth_gasPrice", + "mainnet/eth_getBalance/test_03.json", + "mainnet/eth_getBalance/test_26.json", + "mainnet/eth_getBalance/test_27.json", + "mainnet/eth_getBlockTransactionCountByNumber/test_03.json", + "mainnet/eth_getBlockByNumber/test_10.json", + "mainnet/eth_getBlockByNumber/test_27.json", + "mainnet/eth_getBlockReceipts/test_07.json", + "mainnet/eth_getCode/test_05.json", + "mainnet/eth_getCode/test_06.json", + "mainnet/eth_getCode/test_07.json", + "mainnet/eth_getLogs/test_21.json", + "mainnet/eth_getProof/test_01.json", + "mainnet/eth_getProof/test_02.json", + "mainnet/eth_getProof/test_03.json", + "mainnet/eth_getProof/test_04.json", + "mainnet/eth_getProof/test_05.json", + "mainnet/eth_getProof/test_06.json", + "mainnet/eth_getProof/test_07.json", + "mainnet/eth_getProof/test_08.json", + "mainnet/eth_getProof/test_09.json", + "mainnet/eth_getProof/test_10.json", + "mainnet/eth_getProof/test_11.json", + "mainnet/eth_getProof/test_12.json", + "mainnet/eth_getProof/test_13.json", + "mainnet/eth_getProof/test_14.json", + "mainnet/eth_getProof/test_15.json", + "mainnet/eth_getProof/test_16.json", + "mainnet/eth_getProof/test_17.json", + "mainnet/eth_getProof/test_18.json", + "mainnet/eth_getProof/test_19.json", + "mainnet/eth_getProof/test_20.json", + "mainnet/eth_getRawTransactionByBlockNumberAndIndex/test_11.json", + "mainnet/eth_getRawTransactionByBlockNumberAndIndex/test_12.json", + "mainnet/eth_getRawTransactionByBlockNumberAndIndex/test_13.json", + "mainnet/eth_getStorageAt/test_04.json", + "mainnet/eth_getStorageAt/test_07.json", + "mainnet/eth_getStorageAt/test_08.json", + "mainnet/eth_getTransactionByBlockNumberAndIndex/test_02.json", + "mainnet/eth_getTransactionByBlockNumberAndIndex/test_08.json", + "mainnet/eth_getTransactionByBlockNumberAndIndex/test_09.json", + "mainnet/eth_getTransactionCount/test_02.json", + "mainnet/eth_getTransactionCount/test_07.json", + "mainnet/eth_getTransactionCount/test_08.json", + "mainnet/eth_getUncleCountByBlockNumber/test_03.json", + "mainnet/eth_getUncleByBlockNumberAndIndex/test_02.json", + "mainnet/eth_maxPriorityFeePerGas", + "mainnet/eth_simulateV1/test_04.json", + "mainnet/eth_simulateV1/test_05.json", + "mainnet/eth_simulateV1/test_06.json", + "mainnet/eth_simulateV1/test_07.json", + "mainnet/eth_simulateV1/test_12.json", + "mainnet/eth_simulateV1/test_13.json", + "mainnet/eth_simulateV1/test_14.json", + "mainnet/eth_simulateV1/test_15.json", + "mainnet/eth_simulateV1/test_16.json", + "mainnet/eth_simulateV1/test_25.json", + "mainnet/eth_simulateV1/test_27.json", + "mainnet/erigon_blockNumber/test_4.json", + "mainnet/erigon_blockNumber/test_6.json", + "mainnet/ots_hasCode/test_10.json", + "mainnet/ots_searchTransactionsBefore/test_02.json", + "mainnet/parity_listStorageKeys", + "mainnet/trace_block/test_25.json", + "mainnet/trace_call/test_26.json", + "mainnet/trace_call/test_27.json", + "mainnet/trace_call/test_28.json", + "mainnet/trace_call/test_29.json", + "mainnet/trace_callMany/test_15.json", + "mainnet/trace_filter/test_25.json", + "mainnet/trace_replayBlockTransactions/test_36.json", +} + +// testsNotComparedMessage contains tests where the "message" field is not compared. +var testsNotComparedMessage = []string{} + +// testsNotComparedError contains tests where the "error" field is not compared. +var testsNotComparedError = []string{} diff --git a/internal/perf/config.go b/internal/perf/config.go new file mode 100644 index 00000000..161aad45 --- /dev/null +++ b/internal/perf/config.go @@ -0,0 +1,136 @@ +package perf + +import ( + "fmt" + "os" + "os/user" + "time" +) + +const ( + DefaultTestSequence = "50:30,1000:30,2500:20,10000:20" + DefaultRepetitions = 10 + DefaultVegetaPatternTarFile = "" + DefaultClientVegetaOnCore = "-:-" + DefaultServerAddress = "localhost" + DefaultWaitingTime = 5 + DefaultMaxConn = "9000" + DefaultTestType = "eth_getLogs" + DefaultVegetaResponseTimeout = "300s" + DefaultMaxBodyRsp = "1500" + DefaultClientName = "rpcdaemon" + DefaultClientBuildDir = "" + + BinaryDir = "bin" +) + +// Config holds all configuration for the performance test. +type Config struct { + VegetaPatternTarFile string + ClientVegetaOnCore string + ClientBuildDir string + Repetitions int + TestSequence string + ClientAddress string + TestType string + TestingClient string + WaitingTime int + VersionedTestReport bool + Verbose bool + MacConnection bool + CheckServerAlive bool + Tracing bool + EmptyCache bool + CreateTestReport bool + MaxConnection string + VegetaResponseTimeout string + MaxBodyRsp string + JSONReportFile string + BinaryFileFullPathname string + BinaryFile string + ChainName string + MorePercentiles bool + InstantReport bool + HaltOnVegetaError bool + DisableHttpCompression bool +} + +// NewConfig creates a new Config with default values. +func NewConfig() *Config { + return &Config{ + VegetaPatternTarFile: DefaultVegetaPatternTarFile, + ClientVegetaOnCore: DefaultClientVegetaOnCore, + ClientBuildDir: DefaultClientBuildDir, + Repetitions: DefaultRepetitions, + TestSequence: DefaultTestSequence, + ClientAddress: DefaultServerAddress, + TestType: DefaultTestType, + TestingClient: DefaultClientName, + WaitingTime: DefaultWaitingTime, + VersionedTestReport: false, + Verbose: false, + MacConnection: false, + CheckServerAlive: true, + Tracing: false, + EmptyCache: false, + CreateTestReport: false, + MaxConnection: DefaultMaxConn, + VegetaResponseTimeout: DefaultVegetaResponseTimeout, + MaxBodyRsp: DefaultMaxBodyRsp, + JSONReportFile: "", + BinaryFileFullPathname: "", + BinaryFile: "", + ChainName: "mainnet", + MorePercentiles: false, + InstantReport: false, + HaltOnVegetaError: false, + DisableHttpCompression: false, + } +} + +// Validate checks the configuration for conflicts and invalid values. +func (c *Config) Validate() error { + if c.JSONReportFile != "" && c.TestingClient == "" { + return fmt.Errorf("with json-report must also set testing-client") + } + + if c.ClientBuildDir != "" { + if _, err := os.Stat(c.ClientBuildDir); os.IsNotExist(err) { + return fmt.Errorf("client build dir not specified correctly: %s", c.ClientBuildDir) + } + } + + if c.EmptyCache { + currentUser, err := user.Current() + if err != nil { + return fmt.Errorf("failed to get current user: %w", err) + } + if currentUser.Username != "root" { + return fmt.Errorf("empty-cache option can only be used by root") + } + } + + return nil +} + +// RunDirs holds the temporary directory paths used during a perf run. +type RunDirs struct { + RunTestDir string + PatternDir string + ReportFile string + TarFileName string + PatternBase string +} + +// NewRunDirs creates a new set of run directories based on a timestamp. +func NewRunDirs() *RunDirs { + timestamp := time.Now().UnixNano() + runTestDir := fmt.Sprintf("/tmp/run_tests_%d", timestamp) + return &RunDirs{ + RunTestDir: runTestDir, + PatternDir: runTestDir + "/erigon_stress_test", + ReportFile: runTestDir + "/vegeta_report.hrd", + TarFileName: runTestDir + "/vegeta_TAR_File", + PatternBase: runTestDir + "/erigon_stress_test/vegeta_erigon_", + } +} diff --git a/internal/perf/hardware.go b/internal/perf/hardware.go new file mode 100644 index 00000000..9fa5c9a5 --- /dev/null +++ b/internal/perf/hardware.go @@ -0,0 +1,201 @@ +package perf + +import ( + "fmt" + "os" + "os/exec" + "runtime" + "strings" +) + +// Hardware provides methods to extract hardware information. +type Hardware struct{} + +// Vendor returns the system vendor. +func (h *Hardware) Vendor() string { + if runtime.GOOS != "linux" { + return "unknown" + } + data, err := os.ReadFile("/sys/devices/virtual/dmi/id/sys_vendor") + if err != nil { + return "unknown" + } + return strings.TrimSpace(string(data)) +} + +// NormalizedVendor returns the system vendor as a lowercase first token. +func (h *Hardware) NormalizedVendor() string { + vendor := h.Vendor() + parts := strings.Split(vendor, " ") + if len(parts) > 0 { + return strings.ToLower(parts[0]) + } + return "unknown" +} + +// Product returns the system product name. +func (h *Hardware) Product() string { + if runtime.GOOS != "linux" { + return "unknown" + } + data, err := os.ReadFile("/sys/devices/virtual/dmi/id/product_name") + if err != nil { + return "unknown" + } + return strings.TrimSpace(string(data)) +} + +// Board returns the system board name. +func (h *Hardware) Board() string { + if runtime.GOOS != "linux" { + return "unknown" + } + data, err := os.ReadFile("/sys/devices/virtual/dmi/id/board_name") + if err != nil { + return "unknown" + } + return strings.TrimSpace(string(data)) +} + +// NormalizedProduct returns the system product name as lowercase without whitespaces. +func (h *Hardware) NormalizedProduct() string { + product := h.Product() + return strings.ToLower(strings.ReplaceAll(product, " ", "")) +} + +// NormalizedBoard returns the board name as a lowercase name without whitespaces. +func (h *Hardware) NormalizedBoard() string { + board := h.Board() + parts := strings.Split(board, "/") + if len(parts) > 0 { + return strings.ToLower(strings.ReplaceAll(parts[0], " ", "")) + } + return "unknown" +} + +// GetCPUModel returns the CPU model information. +func (h *Hardware) GetCPUModel() string { + if runtime.GOOS != "linux" { + return "unknown" + } + cmd := exec.Command("sh", "-c", "cat /proc/cpuinfo | grep 'model name' | uniq") + output, err := cmd.Output() + if err != nil { + return "unknown" + } + parts := strings.Split(string(output), ":") + if len(parts) > 1 { + return strings.TrimSpace(parts[1]) + } + return "unknown" +} + +// GetBogomips returns the bogomips value. +func (h *Hardware) GetBogomips() string { + if runtime.GOOS != "linux" { + return "unknown" + } + cmd := exec.Command("sh", "-c", "cat /proc/cpuinfo | grep 'bogomips' | uniq") + output, err := cmd.Output() + if err != nil { + return "unknown" + } + parts := strings.Split(string(output), ":") + if len(parts) > 1 { + return strings.TrimSpace(parts[1]) + } + return "unknown" +} + +// GetKernelVersion returns the kernel version. +func GetKernelVersion() string { + cmd := exec.Command("uname", "-r") + output, err := cmd.Output() + if err != nil { + return "unknown" + } + return strings.TrimSpace(string(output)) +} + +// GetGCCVersion returns the GCC version. +func GetGCCVersion() string { + cmd := exec.Command("gcc", "--version") + output, err := cmd.Output() + if err != nil { + return "unknown" + } + lines := strings.Split(string(output), "\n") + if len(lines) > 0 { + return strings.TrimSpace(lines[0]) + } + return "unknown" +} + +// GetGoVersion returns the Go version. +func GetGoVersion() string { + cmd := exec.Command("go", "version") + output, err := cmd.Output() + if err != nil { + return "unknown" + } + return strings.TrimSpace(string(output)) +} + +// GetGitCommit returns the git commit hash for a directory. +func GetGitCommit(dir string) string { + if dir == "" { + return "" + } + cmd := exec.Command("git", "rev-parse", "HEAD") + cmd.Dir = dir + output, err := cmd.Output() + if err != nil { + return "" + } + return strings.TrimSpace(string(output)) +} + +// GetFileChecksum returns the checksum of a file. +func GetFileChecksum(filepath string) string { + cmd := exec.Command("sum", filepath) + output, err := cmd.Output() + if err != nil { + return "" + } + parts := strings.Split(string(output), " ") + if len(parts) > 0 { + return parts[0] + } + return "" +} + +// IsProcessRunning checks if a process with the given name is running. +func IsProcessRunning(processName string) bool { + cmd := exec.Command("pgrep", "-x", processName) + out, err := cmd.Output() + return err == nil && len(out) > 0 +} + +// EmptyOSCache drops OS caches (requires root on Linux, purge on macOS). +func EmptyOSCache() error { + switch runtime.GOOS { + case "linux": + if err := exec.Command("sync").Run(); err != nil { + return fmt.Errorf("sync failed: %w", err) + } + cmd := exec.Command("sh", "-c", "echo 3 > /proc/sys/vm/drop_caches") + if err := cmd.Run(); err != nil { + return fmt.Errorf("cache purge failed: %w", err) + } + case "darwin": + if err := exec.Command("sync").Run(); err != nil { + return fmt.Errorf("sync failed: %w", err) + } + if err := exec.Command("purge").Run(); err != nil { + return fmt.Errorf("cache purge failed: %w", err) + } + default: + return fmt.Errorf("unsupported OS: %s", runtime.GOOS) + } + return nil +} diff --git a/internal/perf/perf_bench_test.go b/internal/perf/perf_bench_test.go new file mode 100644 index 00000000..a465e43f --- /dev/null +++ b/internal/perf/perf_bench_test.go @@ -0,0 +1,53 @@ +package perf + +import ( + "testing" + "time" +) + +func BenchmarkParseTestSequence(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + ParseTestSequence(DefaultTestSequence) + } +} + +func BenchmarkFormatDuration_Microseconds(b *testing.B) { + d := 500 * time.Microsecond + b.ResetTimer() + for i := 0; i < b.N; i++ { + FormatDuration(d) + } +} + +func BenchmarkFormatDuration_Milliseconds(b *testing.B) { + d := 150 * time.Millisecond + b.ResetTimer() + for i := 0; i < b.N; i++ { + FormatDuration(d) + } +} + +func BenchmarkFormatDuration_Seconds(b *testing.B) { + d := 2500 * time.Millisecond + b.ResetTimer() + for i := 0; i < b.N; i++ { + FormatDuration(d) + } +} + +func BenchmarkCountDigits(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + CountDigits(10000) + } +} + +func BenchmarkGetCompressionType(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + getCompressionType("test.tar.gz") + getCompressionType("test.tar.bz2") + getCompressionType("test.tar") + } +} diff --git a/internal/perf/perf_test.go b/internal/perf/perf_test.go new file mode 100644 index 00000000..d7b4db6c --- /dev/null +++ b/internal/perf/perf_test.go @@ -0,0 +1,262 @@ +package perf + +import ( + "testing" + "time" +) + +func TestParseTestSequence_Valid(t *testing.T) { + seq, err := ParseTestSequence("50:30,1000:30,2500:20") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(seq) != 3 { + t.Fatalf("expected 3 items, got %d", len(seq)) + } + if seq[0].QPS != 50 || seq[0].Duration != 30 { + t.Errorf("item 0: got QPS=%d Duration=%d, want 50:30", seq[0].QPS, seq[0].Duration) + } + if seq[1].QPS != 1000 || seq[1].Duration != 30 { + t.Errorf("item 1: got QPS=%d Duration=%d, want 1000:30", seq[1].QPS, seq[1].Duration) + } + if seq[2].QPS != 2500 || seq[2].Duration != 20 { + t.Errorf("item 2: got QPS=%d Duration=%d, want 2500:20", seq[2].QPS, seq[2].Duration) + } +} + +func TestParseTestSequence_Default(t *testing.T) { + seq, err := ParseTestSequence(DefaultTestSequence) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(seq) != 4 { + t.Fatalf("expected 4 items, got %d", len(seq)) + } +} + +func TestParseTestSequence_InvalidFormat(t *testing.T) { + _, err := ParseTestSequence("50:30,invalid") + if err == nil { + t.Error("expected error for invalid format") + } +} + +func TestParseTestSequence_InvalidQPS(t *testing.T) { + _, err := ParseTestSequence("abc:30") + if err == nil { + t.Error("expected error for invalid QPS") + } +} + +func TestParseTestSequence_InvalidDuration(t *testing.T) { + _, err := ParseTestSequence("50:abc") + if err == nil { + t.Error("expected error for invalid duration") + } +} + +func TestFormatDuration_Microseconds(t *testing.T) { + d := 500 * time.Microsecond + got := FormatDuration(d) + if got != "500µs" { + t.Errorf("FormatDuration(%v): got %q, want %q", d, got, "500µs") + } +} + +func TestFormatDuration_Milliseconds(t *testing.T) { + d := 150 * time.Millisecond + got := FormatDuration(d) + if got != "150.00ms" { + t.Errorf("FormatDuration(%v): got %q, want %q", d, got, "150.00ms") + } +} + +func TestFormatDuration_Seconds(t *testing.T) { + d := 2500 * time.Millisecond + got := FormatDuration(d) + if got != "2.50s" { + t.Errorf("FormatDuration(%v): got %q, want %q", d, got, "2.50s") + } +} + +func TestCountDigits(t *testing.T) { + tests := []struct { + n int + want int + }{ + {0, 1}, + {1, 1}, + {9, 1}, + {10, 2}, + {99, 2}, + {100, 3}, + {1000, 4}, + {10000, 5}, + } + for _, tt := range tests { + got := CountDigits(tt.n) + if got != tt.want { + t.Errorf("CountDigits(%d): got %d, want %d", tt.n, got, tt.want) + } + } +} + +func TestMaxQpsAndDurationDigits(t *testing.T) { + seq := TestSequence{ + {QPS: 50, Duration: 30}, + {QPS: 10000, Duration: 20}, + {QPS: 100, Duration: 5}, + } + maxQps, maxDur := MaxQpsAndDurationDigits(seq) + if maxQps != 5 { + t.Errorf("maxQpsDigits: got %d, want 5", maxQps) + } + if maxDur != 2 { + t.Errorf("maxDurationDigits: got %d, want 2", maxDur) + } +} + +func TestNewConfig_Defaults(t *testing.T) { + cfg := NewConfig() + if cfg.Repetitions != DefaultRepetitions { + t.Errorf("Repetitions: got %d, want %d", cfg.Repetitions, DefaultRepetitions) + } + if cfg.TestSequence != DefaultTestSequence { + t.Errorf("TestSequence: got %q, want %q", cfg.TestSequence, DefaultTestSequence) + } + if cfg.ClientAddress != DefaultServerAddress { + t.Errorf("ClientAddress: got %q, want %q", cfg.ClientAddress, DefaultServerAddress) + } + if cfg.TestType != DefaultTestType { + t.Errorf("TestType: got %q, want %q", cfg.TestType, DefaultTestType) + } + if cfg.MaxConnection != DefaultMaxConn { + t.Errorf("MaxConnection: got %q, want %q", cfg.MaxConnection, DefaultMaxConn) + } + if !cfg.CheckServerAlive { + t.Error("CheckServerAlive should be true by default") + } + if cfg.ChainName != "mainnet" { + t.Errorf("ChainName: got %q, want %q", cfg.ChainName, "mainnet") + } +} + +func TestConfig_Validate_JSONReportWithoutClient(t *testing.T) { + cfg := NewConfig() + cfg.JSONReportFile = "report.json" + cfg.TestingClient = "" + if err := cfg.Validate(); err == nil { + t.Error("expected error when JSONReportFile set without TestingClient") + } +} + +func TestConfig_Validate_NonExistentBuildDir(t *testing.T) { + cfg := NewConfig() + cfg.ClientBuildDir = "/nonexistent/path/that/does/not/exist" + if err := cfg.Validate(); err == nil { + t.Error("expected error for nonexistent ClientBuildDir") + } +} + +func TestConfig_Validate_OK(t *testing.T) { + cfg := NewConfig() + if err := cfg.Validate(); err != nil { + t.Errorf("unexpected validation error: %v", err) + } +} + +func TestNewRunDirs(t *testing.T) { + dirs := NewRunDirs() + if dirs.RunTestDir == "" { + t.Error("RunTestDir should not be empty") + } + if dirs.PatternDir == "" { + t.Error("PatternDir should not be empty") + } + if dirs.TarFileName == "" { + t.Error("TarFileName should not be empty") + } + if dirs.PatternBase == "" { + t.Error("PatternBase should not be empty") + } +} + +func TestParseLatency(t *testing.T) { + tests := []struct { + input string + want string + }{ + {"500µs", "500us"}, + {" 150ms ", "150ms"}, + {"2.5s", "2.5s"}, + } + for _, tt := range tests { + got := ParseLatency(tt.input) + if got != tt.want { + t.Errorf("ParseLatency(%q): got %q, want %q", tt.input, got, tt.want) + } + } +} + +func TestGetCompressionType(t *testing.T) { + tests := []struct { + filename string + want string + }{ + {"test.tar.gz", GzipCompression}, + {"test.tgz", GzipCompression}, + {"test.tar.bz2", Bzip2Compression}, + {"test.tbz", Bzip2Compression}, + {"test.tar", NoCompression}, + {"test.json", NoCompression}, + } + for _, tt := range tests { + got := getCompressionType(tt.filename) + if got != tt.want { + t.Errorf("getCompressionType(%q): got %q, want %q", tt.filename, got, tt.want) + } + } +} + +func TestHardware_NonLinux(t *testing.T) { + h := &Hardware{} + // On macOS (darwin), all Linux-specific methods return "unknown" + if h.Vendor() != "unknown" && h.Vendor() != "" { + // On Linux, this would return actual vendor. On macOS, "unknown". + // Just make sure it doesn't panic. + } + _ = h.NormalizedVendor() + _ = h.Product() + _ = h.Board() + _ = h.NormalizedProduct() + _ = h.NormalizedBoard() + _ = h.GetCPUModel() + _ = h.GetBogomips() +} + +func TestGetKernelVersion(t *testing.T) { + v := GetKernelVersion() + if v == "" { + t.Error("GetKernelVersion should not return empty string") + } +} + +func TestGetGoVersion(t *testing.T) { + v := GetGoVersion() + if v == "" { + t.Error("GetGoVersion should not return empty string") + } +} + +func TestGetGitCommit_EmptyDir(t *testing.T) { + commit := GetGitCommit("") + if commit != "" { + t.Errorf("GetGitCommit empty dir: got %q, want empty", commit) + } +} + +func TestIsProcessRunning_NonExistent(t *testing.T) { + if IsProcessRunning("nonexistent_process_12345") { + t.Error("nonexistent process should not be running") + } +} diff --git a/internal/perf/report.go b/internal/perf/report.go new file mode 100644 index 00000000..73fba20e --- /dev/null +++ b/internal/perf/report.go @@ -0,0 +1,442 @@ +package perf + +import ( + "bytes" + "encoding/csv" + "encoding/json" + "fmt" + "io" + "log" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + vegeta "github.com/tsenart/vegeta/v12/lib" +) + +// PerfMetrics holds the results of a performance test. +type PerfMetrics struct { + ClientName string + TestNumber int + Repetition int + QPS int + Duration int + MinLatency string + Mean string + P50 string + P90 string + P95 string + P99 string + MaxLatency string + SuccessRatio string + Error string + VegetaMetrics *vegeta.Metrics +} + +// JSONReport represents the structure of the JSON performance report. +type JSONReport struct { + Platform PlatformInfo `json:"platform"` + Configuration ConfigurationInfo `json:"configuration"` + Results []JSONTestResult `json:"results"` +} + +// PlatformInfo holds platform hardware and software information. +type PlatformInfo struct { + Vendor string `json:"vendor"` + Product string `json:"product"` + Board string `json:"board"` + CPU string `json:"cpu"` + Bogomips string `json:"bogomips"` + Kernel string `json:"kernel"` + GCCVersion string `json:"gccVersion"` + GoVersion string `json:"goVersion"` + ClientCommit string `json:"clientCommit"` +} + +// ConfigurationInfo holds test configuration information. +type ConfigurationInfo struct { + TestingClient string `json:"testingClient"` + TestingAPI string `json:"testingApi"` + TestSequence string `json:"testSequence"` + TestRepetitions int `json:"testRepetitions"` + VegetaFile string `json:"vegetaFile"` + VegetaChecksum string `json:"vegetaChecksum"` + Taskset string `json:"taskset"` +} + +// JSONTestResult holds results for a single QPS/duration test. +type JSONTestResult struct { + QPS int `json:"qps"` + Duration int `json:"duration"` + TestRepetitions []RepetitionInfo `json:"testRepetitions"` +} + +// RepetitionInfo holds information for a single test repetition. +type RepetitionInfo struct { + VegetaBinary string `json:"vegetaBinary"` + VegetaReport map[string]interface{} `json:"vegetaReport"` + VegetaReportHdrPlot string `json:"vegetaReportHdrPlot"` +} + +// TestReport manages CSV and JSON report generation. +type TestReport struct { + Config *Config + RunDirs *RunDirs + csvFile *os.File + csvWriter *csv.Writer + jsonReport *JSONReport + hardware *Hardware + currentTestIdx int +} + +// NewTestReport creates a new test report instance. +func NewTestReport(config *Config, dirs *RunDirs) *TestReport { + return &TestReport{ + Config: config, + RunDirs: dirs, + hardware: &Hardware{}, + currentTestIdx: -1, + } +} + +// Open initialises the test report and writes headers. +func (tr *TestReport) Open() error { + if err := tr.createCSVFile(); err != nil { + return fmt.Errorf("failed to create CSV file: %w", err) + } + + checksum := GetFileChecksum(tr.Config.VegetaPatternTarFile) + gccVersion := GetGCCVersion() + goVersion := GetGoVersion() + kernelVersion := GetKernelVersion() + cpuModel := tr.hardware.GetCPUModel() + bogomips := tr.hardware.GetBogomips() + + var clientCommit string + if tr.Config.ClientBuildDir != "" { + clientCommit = GetGitCommit(tr.Config.ClientBuildDir) + } else { + clientCommit = "none" + } + + if err := tr.writeTestHeader(cpuModel, bogomips, kernelVersion, checksum, + gccVersion, goVersion, clientCommit); err != nil { + return fmt.Errorf("failed to write test header: %w", err) + } + + if tr.Config.JSONReportFile != "" { + tr.initializeJSONReport(cpuModel, bogomips, kernelVersion, checksum, + gccVersion, goVersion, clientCommit) + } + + return nil +} + +// createCSVFile creates the CSV report file with appropriate naming. +func (tr *TestReport) createCSVFile() error { + extension := tr.hardware.NormalizedProduct() + if extension == "systemproductname" { + extension = tr.hardware.NormalizedBoard() + } + + csvFolder := tr.hardware.NormalizedVendor() + "_" + extension + var csvFolderPath string + if tr.Config.VersionedTestReport { + csvFolderPath = filepath.Join("./perf/reports", tr.Config.ChainName, csvFolder) + } else { + csvFolderPath = filepath.Join(tr.RunDirs.RunTestDir, tr.Config.ChainName, csvFolder) + } + + if err := os.MkdirAll(csvFolderPath, 0755); err != nil { + return fmt.Errorf("failed to create CSV folder: %w", err) + } + + timestamp := time.Now().Format("20060102150405") + var csvFilename string + if tr.Config.TestingClient != "" { + csvFilename = fmt.Sprintf("%s_%s_%s_perf.csv", + tr.Config.TestType, timestamp, tr.Config.TestingClient) + } else { + csvFilename = fmt.Sprintf("%s_%s_perf.csv", + tr.Config.TestType, timestamp) + } + + csvFilepath := filepath.Join(csvFolderPath, csvFilename) + + file, err := os.Create(csvFilepath) + if err != nil { + return fmt.Errorf("failed to create CSV file: %w", err) + } + + tr.csvFile = file + tr.csvWriter = csv.NewWriter(file) + + fmt.Printf("Perf report file: %s\n\n", csvFilepath) + + return nil +} + +// writeTestHeader writes the test configuration header to CSV. +func (tr *TestReport) writeTestHeader(cpuModel, bogomips, kernelVersion, checksum, gccVersion, goVersion, clientCommit string) error { + emptyRow := make([]string, 14) + + if err := tr.csvWriter.Write(append(emptyRow[:12], "vendor", tr.hardware.Vendor())); err != nil { + return err + } + + product := tr.hardware.Product() + if product != "System Product Name" { + if err := tr.csvWriter.Write(append(emptyRow[:12], "product", product)); err != nil { + return err + } + } else { + if err := tr.csvWriter.Write(append(emptyRow[:12], "board", tr.hardware.Board())); err != nil { + return err + } + } + + rows := [][2]string{ + {"cpu", cpuModel}, + {"bogomips", bogomips}, + {"kernel", kernelVersion}, + {"taskset", tr.Config.ClientVegetaOnCore}, + {"vegetaFile", tr.Config.VegetaPatternTarFile}, + {"vegetaChecksum", checksum}, + {"gccVersion", gccVersion}, + {"goVersion", goVersion}, + {"clientVersion", clientCommit}, + } + for _, r := range rows { + if err := tr.csvWriter.Write(append(emptyRow[:12], r[0], r[1])); err != nil { + return err + } + } + + for range 2 { + if err := tr.csvWriter.Write([]string{}); err != nil { + return err + } + } + + headers := []string{ + "ClientName", "TestNo", "Repetition", "Qps", "Time(secs)", + "Min", "Mean", "50", "90", "95", "99", "Max", "Ratio", "Error", + } + if err := tr.csvWriter.Write(headers); err != nil { + return err + } + tr.csvWriter.Flush() + + return tr.csvWriter.Error() +} + +// initializeJSONReport initializes the JSON report structure. +func (tr *TestReport) initializeJSONReport(cpuModel, bogomips, kernelVersion, checksum, + gccVersion, goVersion, clientCommit string) { + + tr.jsonReport = &JSONReport{ + Platform: PlatformInfo{ + Vendor: strings.TrimSpace(tr.hardware.Vendor()), + Product: strings.TrimSpace(tr.hardware.Product()), + Board: strings.TrimSpace(tr.hardware.Board()), + CPU: strings.TrimSpace(cpuModel), + Bogomips: strings.TrimSpace(bogomips), + Kernel: strings.TrimSpace(kernelVersion), + GCCVersion: strings.TrimSpace(gccVersion), + GoVersion: strings.TrimSpace(goVersion), + ClientCommit: strings.TrimSpace(clientCommit), + }, + Configuration: ConfigurationInfo{ + TestingClient: tr.Config.TestingClient, + TestingAPI: tr.Config.TestType, + TestSequence: tr.Config.TestSequence, + TestRepetitions: tr.Config.Repetitions, + VegetaFile: tr.Config.VegetaPatternTarFile, + VegetaChecksum: checksum, + Taskset: tr.Config.ClientVegetaOnCore, + }, + Results: []JSONTestResult{}, + } +} + +// WriteTestReport writes a test result to the report. +func (tr *TestReport) WriteTestReport(metrics *PerfMetrics) error { + row := []string{ + metrics.ClientName, + strconv.Itoa(metrics.TestNumber), + strconv.Itoa(metrics.Repetition), + strconv.Itoa(metrics.QPS), + strconv.Itoa(metrics.Duration), + metrics.MinLatency, + metrics.Mean, + metrics.P50, + metrics.P90, + metrics.P95, + metrics.P99, + metrics.MaxLatency, + metrics.SuccessRatio, + metrics.Error, + } + + if err := tr.csvWriter.Write(row); err != nil { + return fmt.Errorf("failed to write CSV row: %w", err) + } + tr.csvWriter.Flush() + + if tr.Config.JSONReportFile != "" { + if err := tr.writeTestReportToJSON(metrics); err != nil { + return fmt.Errorf("failed to write JSON report: %w", err) + } + } + + return nil +} + +// writeTestReportToJSON writes a test result to the JSON report. +func (tr *TestReport) writeTestReportToJSON(metrics *PerfMetrics) error { + if metrics.Repetition == 0 { + tr.currentTestIdx++ + tr.jsonReport.Results = append(tr.jsonReport.Results, JSONTestResult{ + QPS: metrics.QPS, + Duration: metrics.Duration, + TestRepetitions: []RepetitionInfo{}, + }) + } + + jsonReportData, err := generateJSONReport(tr.Config.BinaryFileFullPathname) + if err != nil { + return fmt.Errorf("failed to generate JSON report: %w", err) + } + + hdrPlot, err := generateHdrPlot(tr.Config.BinaryFileFullPathname) + if err != nil { + return fmt.Errorf("failed to generate HDR plot: %w", err) + } + + repetitionInfo := RepetitionInfo{ + VegetaBinary: tr.Config.BinaryFile, + VegetaReport: jsonReportData, + VegetaReportHdrPlot: hdrPlot, + } + + if tr.currentTestIdx >= 0 && tr.currentTestIdx < len(tr.jsonReport.Results) { + tr.jsonReport.Results[tr.currentTestIdx].TestRepetitions = append( + tr.jsonReport.Results[tr.currentTestIdx].TestRepetitions, + repetitionInfo, + ) + } + + return nil +} + +// generateJSONReport generates a JSON report from a vegeta binary file. +func generateJSONReport(binaryFile string) (map[string]interface{}, error) { + file, err := os.Open(binaryFile) + if err != nil { + return nil, err + } + defer file.Close() + + dec := vegeta.NewDecoder(file) + var metrics vegeta.Metrics + for { + var result vegeta.Result + if err := dec.Decode(&result); err != nil { + if err == io.EOF { + break + } + return nil, err + } + metrics.Add(&result) + } + metrics.Close() + + report := map[string]interface{}{ + "requests": metrics.Requests, + "duration": metrics.Duration.Seconds(), + "rate": metrics.Rate, + "throughput": metrics.Throughput, + "success": metrics.Success, + "latencies": map[string]interface{}{ + "min": metrics.Latencies.Min.Seconds(), + "mean": metrics.Latencies.Mean.Seconds(), + "p50": metrics.Latencies.P50.Seconds(), + "p90": metrics.Latencies.P90.Seconds(), + "p95": metrics.Latencies.P95.Seconds(), + "p99": metrics.Latencies.P99.Seconds(), + "max": metrics.Latencies.Max.Seconds(), + }, + "status_codes": metrics.StatusCodes, + "errors": metrics.Errors, + } + + return report, nil +} + +// generateHdrPlot generates HDR histogram plot data from a vegeta binary file. +func generateHdrPlot(binaryFile string) (string, error) { + file, err := os.Open(binaryFile) + if err != nil { + return "", err + } + defer file.Close() + + dec := vegeta.NewDecoder(file) + var metrics vegeta.Metrics + for { + var result vegeta.Result + if err := dec.Decode(&result); err != nil { + if err == io.EOF { + break + } + return "", err + } + metrics.Add(&result) + } + metrics.Close() + + var buf bytes.Buffer + histogram := metrics.Histogram + if histogram != nil { + for i, bucket := range histogram.Buckets { + if _, err := fmt.Fprintf(&buf, "%.6f %d\n", float64(bucket), histogram.Counts[i]); err != nil { + return "", err + } + } + } + + return buf.String(), nil +} + +// Close finalises and closes the test report. +func (tr *TestReport) Close() error { + if tr.csvWriter != nil { + tr.csvWriter.Flush() + if err := tr.csvWriter.Error(); err != nil { + log.Printf("CSV writer error: %v", err) + } + } + + if tr.csvFile != nil { + if err := tr.csvFile.Close(); err != nil { + return fmt.Errorf("failed to close CSV file: %w", err) + } + } + + if tr.Config.JSONReportFile != "" && tr.jsonReport != nil { + fmt.Printf("Create json file: %s\n", tr.Config.JSONReportFile) + + jsonData, err := json.MarshalIndent(tr.jsonReport, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal JSON report: %w", err) + } + + if err := os.WriteFile(tr.Config.JSONReportFile, jsonData, 0644); err != nil { + return fmt.Errorf("failed to write JSON report: %w", err) + } + } + + return nil +} diff --git a/internal/perf/sequence.go b/internal/perf/sequence.go new file mode 100644 index 00000000..926434b6 --- /dev/null +++ b/internal/perf/sequence.go @@ -0,0 +1,99 @@ +package perf + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// TestSequenceItem represents a single test in the sequence. +type TestSequenceItem struct { + QPS int + Duration int +} + +// TestSequence is a list of test sequence items. +type TestSequence []TestSequenceItem + +// ParseTestSequence parses the test sequence string "QPS:Duration,..." into structured items. +func ParseTestSequence(sequence string) (TestSequence, error) { + var items TestSequence + + parts := strings.Split(sequence, ",") + for _, part := range parts { + qpsDur := strings.Split(part, ":") + if len(qpsDur) != 2 { + return nil, fmt.Errorf("invalid test sequence format: %s", part) + } + + qps, err := strconv.Atoi(qpsDur[0]) + if err != nil { + return nil, fmt.Errorf("invalid QPS value: %s", qpsDur[0]) + } + + duration, err := strconv.Atoi(qpsDur[1]) + if err != nil { + return nil, fmt.Errorf("invalid duration value: %s", qpsDur[1]) + } + + items = append(items, TestSequenceItem{ + QPS: qps, + Duration: duration, + }) + } + + return items, nil +} + +// ResultFormat holds formatting widths for console output alignment. +type ResultFormat struct { + MaxRepetitionDigits int + MaxQpsDigits int + MaxDurationDigits int +} + +// CountDigits returns the number of decimal digits in n. +func CountDigits(n int) int { + if n == 0 { + return 1 + } + digits := 0 + for n != 0 { + n /= 10 + digits++ + } + return digits +} + +// MaxQpsAndDurationDigits computes the max digit widths across a sequence. +func MaxQpsAndDurationDigits(sequence TestSequence) (maxQpsDigits, maxDurationDigits int) { + for _, item := range sequence { + qpsDigits := CountDigits(item.QPS) + if qpsDigits > maxQpsDigits { + maxQpsDigits = qpsDigits + } + durationDigits := CountDigits(item.Duration) + if durationDigits > maxDurationDigits { + maxDurationDigits = durationDigits + } + } + return +} + +// FormatDuration formats a duration string with appropriate units. +func FormatDuration(d time.Duration) string { + if d < time.Millisecond { + return fmt.Sprintf("%.0fµs", float64(d.Microseconds())) + } + if d < time.Second { + return fmt.Sprintf("%.2fms", float64(d.Microseconds())/1000.0) + } + return fmt.Sprintf("%.2fs", d.Seconds()) +} + +// ParseLatency parses a latency string and returns it in a consistent format. +func ParseLatency(latency string) string { + latency = strings.ReplaceAll(latency, "µs", "us") + return strings.TrimSpace(latency) +} diff --git a/internal/perf/vegeta.go b/internal/perf/vegeta.go new file mode 100644 index 00000000..74117881 --- /dev/null +++ b/internal/perf/vegeta.go @@ -0,0 +1,588 @@ +package perf + +import ( + "archive/tar" + "bufio" + "compress/bzip2" + "compress/gzip" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + vegeta "github.com/tsenart/vegeta/v12/lib" +) + +// VegetaTarget represents a single HTTP request target for Vegeta. +type VegetaTarget struct { + Method string `json:"method"` + URL string `json:"url"` + Body []byte `json:"body,omitempty"` + Header map[string][]string `json:"header,omitempty"` +} + +// PerfTest manages performance test execution. +type PerfTest struct { + Config *Config + Report *TestReport + RunDirs *RunDirs +} + +// NewPerfTest creates a new performance test instance. +func NewPerfTest(config *Config, report *TestReport, dirs *RunDirs) (*PerfTest, error) { + pt := &PerfTest{ + Config: config, + Report: report, + RunDirs: dirs, + } + + if err := pt.Cleanup(true); err != nil { + return nil, fmt.Errorf("initial cleanup failed: %w", err) + } + + if err := pt.CopyAndExtractPatternFile(); err != nil { + return nil, fmt.Errorf("failed to setup pattern file: %w", err) + } + + return pt, nil +} + +// Cleanup removes temporary files. +func (pt *PerfTest) Cleanup(initial bool) error { + filesToRemove := []string{ + pt.RunDirs.TarFileName, + "perf.data.old", + "perf.data", + } + + for _, fileName := range filesToRemove { + _, err := os.Stat(fileName) + if errors.Is(err, os.ErrNotExist) { + continue + } + if err := os.Remove(fileName); err != nil { + return err + } + } + + if err := os.RemoveAll(pt.RunDirs.PatternDir); err != nil { + return err + } + + if initial { + if err := os.RemoveAll(pt.RunDirs.RunTestDir); err != nil { + return err + } + } else { + _ = os.Remove(pt.RunDirs.RunTestDir) + } + + return nil +} + +// CopyAndExtractPatternFile copies and extracts the vegeta pattern tar file. +func (pt *PerfTest) CopyAndExtractPatternFile() error { + if _, err := os.Stat(pt.Config.VegetaPatternTarFile); os.IsNotExist(err) { + return fmt.Errorf("invalid pattern file: %s", pt.Config.VegetaPatternTarFile) + } + + if err := os.MkdirAll(pt.RunDirs.RunTestDir, 0755); err != nil { + return fmt.Errorf("failed to create temp directory: %w", err) + } + + if err := copyFile(pt.Config.VegetaPatternTarFile, pt.RunDirs.TarFileName); err != nil { + return fmt.Errorf("failed to copy pattern file: %w", err) + } + + if pt.Config.Tracing { + fmt.Printf("Copy Vegeta pattern: %s -> %s\n", pt.Config.VegetaPatternTarFile, pt.RunDirs.TarFileName) + } + + if err := extractTarGz(pt.RunDirs.TarFileName, pt.RunDirs.RunTestDir); err != nil { + return fmt.Errorf("failed to extract pattern file: %w", err) + } + + if pt.Config.Tracing { + fmt.Printf("Extracting Vegeta pattern to: %s\n", pt.RunDirs.RunTestDir) + } + + if pt.Config.ClientAddress != "localhost" { + patternFile := pt.RunDirs.PatternBase + pt.Config.TestType + ".txt" + if err := replaceInFile(patternFile, "localhost", pt.Config.ClientAddress); err != nil { + log.Printf("Warning: failed to replace address in pattern: %v", err) + } + } + + return nil +} + +// Execute runs a single performance test. +func (pt *PerfTest) Execute(ctx context.Context, testNumber, repetition int, name string, qps, duration int, format ResultFormat) error { + if pt.Config.EmptyCache { + if err := EmptyOSCache(); err != nil { + log.Printf("Warning: failed to empty cache: %v", err) + } + } + + pattern := pt.RunDirs.PatternBase + pt.Config.TestType + ".txt" + + timestamp := time.Now().Format("20060102150405") + pt.Config.BinaryFile = fmt.Sprintf("%s_%s_%s_%s_%d_%d_%d.bin", + timestamp, + pt.Config.ChainName, + pt.Config.TestingClient, + pt.Config.TestType, + qps, + duration, + repetition+1) + + var dirname string + if pt.Config.VersionedTestReport { + dirname = "./perf/reports/" + BinaryDir + "/" + } else { + dirname = pt.RunDirs.RunTestDir + "/" + BinaryDir + "/" + } + + if err := os.MkdirAll(dirname, 0755); err != nil { + return fmt.Errorf("failed to create binary directory: %w", err) + } + + pt.Config.BinaryFileFullPathname = dirname + pt.Config.BinaryFile + + maxRepDigits := strconv.Itoa(format.MaxRepetitionDigits) + maxQpsDigits := strconv.Itoa(format.MaxQpsDigits) + maxDurDigits := strconv.Itoa(format.MaxDurationDigits) + fmt.Printf("[%d.%"+maxRepDigits+"d] %s: executes test qps: %"+maxQpsDigits+"d time: %"+maxDurDigits+"d -> ", + testNumber, repetition+1, pt.Config.TestingClient, qps, duration) + + targets, err := pt.loadTargets(pattern) + if err != nil { + return fmt.Errorf("failed to load targets: %w", err) + } + + metrics, err := pt.runVegetaAttack(ctx, targets, qps, time.Duration(duration)*time.Second, pt.Config.BinaryFileFullPathname) + if err != nil { + return fmt.Errorf("vegeta attack failed: %w", err) + } + + if pt.Config.CheckServerAlive { + if !IsProcessRunning(pt.Config.TestingClient) { + fmt.Println("test failed: server is Dead") + return fmt.Errorf("server died during test") + } + } + + return pt.processResults(testNumber, repetition, name, qps, duration, metrics) +} + +// ExecuteSequence executes a sequence of performance tests. +func (pt *PerfTest) ExecuteSequence(ctx context.Context, sequence TestSequence, tag string) error { + testNumber := 1 + + pattern := pt.RunDirs.PatternBase + pt.Config.TestType + ".txt" + + if file, err := os.Open(pattern); err == nil { + scanner := bufio.NewScanner(file) + if scanner.Scan() { + var vt VegetaTarget + if json.Unmarshal([]byte(scanner.Text()), &vt) == nil { + fmt.Printf("Test on port: %s\n", vt.URL) + } + } + file.Close() + } + + maxQpsDigits, maxDurationDigits := MaxQpsAndDurationDigits(sequence) + resultFormat := ResultFormat{ + MaxRepetitionDigits: CountDigits(pt.Config.Repetitions), + MaxQpsDigits: maxQpsDigits, + MaxDurationDigits: maxDurationDigits, + } + + for _, test := range sequence { + for rep := 0; rep < pt.Config.Repetitions; rep++ { + if test.QPS > 0 { + if err := pt.Execute(ctx, testNumber, rep, tag, test.QPS, test.Duration, resultFormat); err != nil { + return err + } + } else { + time.Sleep(time.Duration(test.Duration) * time.Second) + } + + time.Sleep(time.Duration(pt.Config.WaitingTime) * time.Second) + } + testNumber++ + fmt.Println() + } + + return nil +} + +// loadTargets loads Vegeta targets from a pattern file. +func (pt *PerfTest) loadTargets(filepath string) ([]vegeta.Target, error) { + file, err := os.Open(filepath) + if err != nil { + return nil, err + } + defer file.Close() + + const maxCapacity = 1024 * 1024 + var targets []vegeta.Target + scanner := bufio.NewScanner(file) + buffer := make([]byte, 0, maxCapacity) + scanner.Buffer(buffer, maxCapacity) + + for scanner.Scan() { + line := scanner.Text() + if line == "" { + continue + } + + var vt VegetaTarget + if err := json.Unmarshal([]byte(line), &vt); err != nil { + return nil, fmt.Errorf("failed to parse target: %w", err) + } + + target := vegeta.Target{ + Method: vt.Method, + URL: vt.URL, + Body: vt.Body, + Header: make(http.Header), + } + + for k, v := range vt.Header { + for _, vv := range v { + target.Header.Set(k, vv) + } + } + + targets = append(targets, target) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + if len(targets) == 0 { + return nil, fmt.Errorf("no targets found in pattern file") + } + + return targets, nil +} + +// runVegetaAttack executes a Vegeta attack using the library. +func (pt *PerfTest) runVegetaAttack(ctx context.Context, targets []vegeta.Target, qps int, duration time.Duration, outputFile string) (*vegeta.Metrics, error) { + rate := vegeta.Rate{Freq: qps, Per: time.Second} + targeter := vegeta.NewStaticTargeter(targets...) + + timeout, _ := time.ParseDuration(pt.Config.VegetaResponseTimeout) + maxConnInt, _ := strconv.Atoi(pt.Config.MaxConnection) + maxBodyInt, _ := strconv.Atoi(pt.Config.MaxBodyRsp) + + tr := &http.Transport{ + DisableCompression: pt.Config.DisableHttpCompression, + Proxy: http.ProxyFromEnvironment, + } + + customClient := &http.Client{ + Transport: tr, + } + + attacker := vegeta.NewAttacker( + vegeta.Client(customClient), + vegeta.Timeout(timeout), + vegeta.Workers(uint64(maxConnInt)), + vegeta.MaxBody(int64(maxBodyInt)), + vegeta.KeepAlive(true), + ) + + out, err := os.Create(outputFile) + if err != nil { + return nil, fmt.Errorf("failed to create output file: %w", err) + } + defer out.Close() + + encoder := vegeta.NewEncoder(out) + + var metrics vegeta.Metrics + resultCh := attacker.Attack(targeter, rate, duration, "vegeta-attack") + for { + select { + case result := <-resultCh: + if result == nil { + metrics.Close() + return &metrics, nil + } + metrics.Add(result) + if err := encoder.Encode(result); err != nil { + log.Printf("Warning: failed to encode result: %v", err) + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +// processResults processes the vegeta metrics and generates reports. +func (pt *PerfTest) processResults(testNumber, repetition int, name string, qps, duration int, metrics *vegeta.Metrics) error { + minLatency := FormatDuration(metrics.Latencies.Min) + mean := FormatDuration(metrics.Latencies.Mean) + p50 := FormatDuration(metrics.Latencies.P50) + p90 := FormatDuration(metrics.Latencies.P90) + p95 := FormatDuration(metrics.Latencies.P95) + p99 := FormatDuration(metrics.Latencies.P99) + maxLatency := FormatDuration(metrics.Latencies.Max) + + successRatio := fmt.Sprintf("%.2f%%", metrics.Success*100) + + errorMsg := "" + if len(metrics.Errors) > 0 { + errorMap := make(map[string]int) + for _, err := range metrics.Errors { + errorMap[err]++ + } + + const MaxErrorsToDisplay = 1 + errorsToDisplay := 0 + for errStr, count := range errorMap { + if errorsToDisplay >= MaxErrorsToDisplay { + break + } + if errorMsg != "" { + errorMsg += "; " + } + errorMsg += fmt.Sprintf("%s (x%d)", errStr, count) + errorsToDisplay++ + } + if errorsToDisplay < len(errorMap) { + errorMsg += fmt.Sprintf(" (+%d more)", len(errorMap)-errorsToDisplay) + } + } + + var resultRecord string + if pt.Config.MorePercentiles { + resultRecord = fmt.Sprintf("success=%7s lat=[p50=%8s p90=%8s p95=%8s p99=%8s max=%8s]", + successRatio, p50, p90, p95, p99, maxLatency) + } else { + resultRecord = fmt.Sprintf("success=%7s lat=[max=%8s]", successRatio, maxLatency) + } + if errorMsg != "" { + resultRecord += fmt.Sprintf(" error=%s", errorMsg) + } + fmt.Println(resultRecord) + + if errorMsg != "" && pt.Config.HaltOnVegetaError { + return fmt.Errorf("test failed: %s", errorMsg) + } + + if successRatio != "100.00%" { + return fmt.Errorf("test failed: ratio is not 100.00%%") + } + + if pt.Config.CreateTestReport { + testMetrics := &PerfMetrics{ + ClientName: name, + TestNumber: testNumber, + Repetition: repetition, + QPS: qps, + Duration: duration, + MinLatency: minLatency, + Mean: mean, + P50: p50, + P90: p90, + P95: p95, + P99: p99, + MaxLatency: maxLatency, + SuccessRatio: successRatio, + Error: errorMsg, + VegetaMetrics: metrics, + } + + if err := pt.Report.WriteTestReport(testMetrics); err != nil { + return fmt.Errorf("failed to write test report: %w", err) + } + } + + if pt.Config.InstantReport { + printInstantReport(metrics) + } + + return nil +} + +// printInstantReport prints detailed metrics to the console. +func printInstantReport(metrics *vegeta.Metrics) { + fmt.Println("\n=== Detailed Metrics ===") + fmt.Printf("Requests: %d\n", metrics.Requests) + fmt.Printf("Duration: %v\n", metrics.Duration) + fmt.Printf("Rate: %.2f req/s\n", metrics.Rate) + fmt.Printf("Throughput: %.2f req/s\n", metrics.Throughput) + fmt.Printf("Success: %.2f%%\n", metrics.Success*100) + + fmt.Println("\nLatencies:") + fmt.Printf(" Min: %v\n", metrics.Latencies.Min) + fmt.Printf(" Mean: %v\n", metrics.Latencies.Mean) + fmt.Printf(" P50: %v\n", metrics.Latencies.P50) + fmt.Printf(" P90: %v\n", metrics.Latencies.P90) + fmt.Printf(" P95: %v\n", metrics.Latencies.P95) + fmt.Printf(" P99: %v\n", metrics.Latencies.P99) + fmt.Printf(" Max: %v\n", metrics.Latencies.Max) + + fmt.Println("\nStatus Codes:") + for code, count := range metrics.StatusCodes { + fmt.Printf(" %s: %d\n", code, count) + } + + if len(metrics.Errors) > 0 { + fmt.Println("\nErrors:") + errorMap := make(map[string]int) + for _, err := range metrics.Errors { + errorMap[err]++ + } + for errStr, count := range errorMap { + fmt.Printf(" %s: %d\n", errStr, count) + } + } + + fmt.Print("========================\n\n") +} + +// Compression type constants. +const ( + GzipCompression = ".gz" + Bzip2Compression = ".bz2" + NoCompression = "" +) + +func getCompressionType(filename string) string { + if strings.HasSuffix(filename, ".tar.gz") || strings.HasSuffix(filename, ".tgz") { + return GzipCompression + } + if strings.HasSuffix(filename, ".tar.bz2") || strings.HasSuffix(filename, ".tbz") { + return Bzip2Compression + } + return NoCompression +} + +func autodetectCompression(inFile *os.File) (string, error) { + compressionType := NoCompression + tarReader := tar.NewReader(inFile) + _, err := tarReader.Next() + if err != nil && !errors.Is(err, io.EOF) { + if _, err = inFile.Seek(0, io.SeekStart); err != nil { + return compressionType, err + } + if _, err = gzip.NewReader(inFile); err == nil { + compressionType = GzipCompression + } else { + if _, err = inFile.Seek(0, io.SeekStart); err != nil { + return compressionType, err + } + if _, err = tar.NewReader(bzip2.NewReader(inFile)).Next(); err == nil { + compressionType = Bzip2Compression + } + } + } + return compressionType, nil +} + +func extractTarGz(tarFile, destDir string) error { + file, err := os.Open(tarFile) + if err != nil { + return fmt.Errorf("failed to open archive: %w", err) + } + defer file.Close() + + compressionType := getCompressionType(tarFile) + if compressionType == NoCompression { + compressionType, err = autodetectCompression(file) + if err != nil { + return fmt.Errorf("failed to autodetect compression for archive: %w", err) + } + file.Close() + file, err = os.Open(tarFile) + if err != nil { + return err + } + defer file.Close() + } + + var reader io.Reader + switch compressionType { + case GzipCompression: + if reader, err = gzip.NewReader(file); err != nil { + return fmt.Errorf("failed to create gzip reader: %w", err) + } + case Bzip2Compression: + reader = bzip2.NewReader(file) + case NoCompression: + reader = file + } + + tr := tar.NewReader(reader) + + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + target := filepath.Join(destDir, header.Name) + + switch header.Typeflag { + case tar.TypeDir: + if err := os.MkdirAll(target, 0755); err != nil { + return err + } + case tar.TypeReg: + outFile, err := os.Create(target) + if err != nil { + return err + } + if _, err := io.Copy(outFile, tr); err != nil { + outFile.Close() + return err + } + outFile.Close() + } + } + + return nil +} + +func copyFile(src, dst string) error { + sourceFile, err := os.Open(src) + if err != nil { + return err + } + defer sourceFile.Close() + + destFile, err := os.Create(dst) + if err != nil { + return err + } + defer destFile.Close() + + _, err = io.Copy(destFile, sourceFile) + return err +} + +func replaceInFile(filepath, old, new string) error { + input, err := os.ReadFile(filepath) + if err != nil { + return err + } + output := strings.ReplaceAll(string(input), old, new) + return os.WriteFile(filepath, []byte(output), 0644) +} diff --git a/internal/rpc/client.go b/internal/rpc/client.go new file mode 100644 index 00000000..450c171f --- /dev/null +++ b/internal/rpc/client.go @@ -0,0 +1,43 @@ +package rpc + +import ( + "context" + "fmt" + "strings" + "time" +) + +// Metrics tracks timing statistics for a single RPC call. +type Metrics struct { + RoundTripTime time.Duration + MarshallingTime time.Duration + UnmarshallingTime time.Duration +} + +// Client dispatches JSON-RPC requests over HTTP or WebSocket transports. +type Client struct { + verbose int + transport string + jwtAuth string +} + +// NewClient creates a new RPC client for the given transport type. +func NewClient(transport string, jwtAuth string, verbose int) *Client { + return &Client{ + verbose: verbose, + transport: transport, + jwtAuth: jwtAuth, + } +} + +// Call sends a JSON-RPC request and decodes the response into the provided target. +// Returns timing metrics and any error encountered. +func (c *Client) Call(ctx context.Context, target string, request []byte, response any) (Metrics, error) { + if strings.HasPrefix(c.transport, "http") { + return c.callHTTP(ctx, target, request, response) + } + if strings.HasPrefix(c.transport, "websocket") { + return c.callWebSocket(target, request, response) + } + return Metrics{}, fmt.Errorf("unsupported transport: %s", c.transport) +} diff --git a/internal/rpc/client_bench_test.go b/internal/rpc/client_bench_test.go new file mode 100644 index 00000000..6929db8d --- /dev/null +++ b/internal/rpc/client_bench_test.go @@ -0,0 +1,39 @@ +package rpc + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" +) + +func BenchmarkCallHTTP(b *testing.B) { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"jsonrpc":"2.0","id":1,"result":"0x1"}`)) + }) + server := httptest.NewServer(handler) + defer server.Close() + + client := NewClient("http", "", 0) + request := []byte(`{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}`) + ctx := context.Background() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + var result interface{} + client.Call(ctx, server.URL, request, &result) + } +} + +func BenchmarkValidateJsonRpcResponse(b *testing.B) { + response := map[string]interface{}{ + "jsonrpc": "2.0", + "id": float64(1), + "result": "0x1", + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + ValidateJsonRpcResponse(response) + } +} diff --git a/internal/rpc/client_test.go b/internal/rpc/client_test.go new file mode 100644 index 00000000..f69e118e --- /dev/null +++ b/internal/rpc/client_test.go @@ -0,0 +1,255 @@ +package rpc + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + json "encoding/json" +) + +func TestNewClient(t *testing.T) { + c := NewClient("http", "Bearer token", 1) + if c.transport != "http" { + t.Errorf("transport: got %q, want %q", c.transport, "http") + } + if c.jwtAuth != "Bearer token" { + t.Errorf("jwtAuth: got %q, want %q", c.jwtAuth, "Bearer token") + } + if c.verbose != 1 { + t.Errorf("verbose: got %d, want 1", c.verbose) + } +} + +func TestCallHTTP_Success(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Errorf("method: got %q, want POST", r.Method) + } + if ct := r.Header.Get("Content-Type"); ct != "application/json" { + t.Errorf("Content-Type: got %q", ct) + } + if ae := r.Header.Get("Accept-Encoding"); ae != "Identity" { + t.Errorf("Accept-Encoding: got %q, want Identity", ae) + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]interface{}{ + "jsonrpc": "2.0", + "id": 1, + "result": "0x1", + }) + })) + defer server.Close() + + // Strip http:// prefix since the client adds it + target := strings.TrimPrefix(server.URL, "http://") + client := NewClient("http", "", 0) + + var response any + metrics, err := client.Call(context.Background(), target, []byte(`{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}`), &response) + if err != nil { + t.Fatalf("Call: %v", err) + } + + if metrics.RoundTripTime == 0 { + t.Error("RoundTripTime should be > 0") + } + if metrics.UnmarshallingTime == 0 { + t.Error("UnmarshallingTime should be > 0") + } + + respMap, ok := response.(map[string]interface{}) + if !ok { + t.Fatal("response is not a map") + } + if respMap["result"] != "0x1" { + t.Errorf("result: got %v", respMap["result"]) + } +} + +func TestCallHTTP_JWTHeader(t *testing.T) { + var gotAuth string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gotAuth = r.Header.Get("Authorization") + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]interface{}{ + "jsonrpc": "2.0", + "id": 1, + "result": nil, + }) + })) + defer server.Close() + + target := strings.TrimPrefix(server.URL, "http://") + client := NewClient("http", "Bearer mytoken", 0) + + var response any + _, err := client.Call(context.Background(), target, []byte(`{}`), &response) + if err != nil { + t.Fatalf("Call: %v", err) + } + + if gotAuth != "Bearer mytoken" { + t.Errorf("Authorization: got %q, want %q", gotAuth, "Bearer mytoken") + } +} + +func TestCallHTTP_Compression(t *testing.T) { + var gotAcceptEncoding string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gotAcceptEncoding = r.Header.Get("Accept-Encoding") + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]interface{}{ + "jsonrpc": "2.0", + "id": 1, + "result": nil, + }) + })) + defer server.Close() + + target := strings.TrimPrefix(server.URL, "http://") + + // http_comp should NOT set Accept-Encoding: Identity + client := NewClient("http_comp", "", 0) + var response any + _, err := client.Call(context.Background(), target, []byte(`{}`), &response) + if err != nil { + t.Fatalf("Call: %v", err) + } + + if gotAcceptEncoding == "Identity" { + t.Error("http_comp should not set Accept-Encoding: Identity") + } +} + +func TestCallHTTP_ServerError(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer server.Close() + + target := strings.TrimPrefix(server.URL, "http://") + client := NewClient("http", "", 0) + + var response any + _, err := client.Call(context.Background(), target, []byte(`{}`), &response) + if err == nil { + t.Error("expected error for 500 response") + } +} + +func TestCallHTTP_ConnectionRefused(t *testing.T) { + client := NewClient("http", "", 0) + var response any + _, err := client.Call(context.Background(), "localhost:1", []byte(`{}`), &response) + if err == nil { + t.Error("expected error for connection refused") + } +} + +func TestCallHTTP_UnsupportedTransport(t *testing.T) { + client := NewClient("grpc", "", 0) + var response any + _, err := client.Call(context.Background(), "localhost:1", []byte(`{}`), &response) + if err == nil { + t.Error("expected error for unsupported transport") + } +} + +func TestValidateJsonRpcResponse_Valid(t *testing.T) { + resp := map[string]any{ + "jsonrpc": "2.0", + "id": float64(1), + "result": "0x1", + } + if err := ValidateJsonRpcResponse(resp); err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestValidateJsonRpcResponse_MissingJsonrpc(t *testing.T) { + resp := map[string]any{ + "id": float64(1), + "result": "0x1", + } + if err := ValidateJsonRpcResponse(resp); err == nil { + t.Error("expected error for missing jsonrpc") + } +} + +func TestValidateJsonRpcResponse_MissingId(t *testing.T) { + resp := map[string]any{ + "jsonrpc": "2.0", + "result": "0x1", + } + if err := ValidateJsonRpcResponse(resp); err == nil { + t.Error("expected error for missing id") + } +} + +func TestValidateJsonRpcResponse_BatchValid(t *testing.T) { + resp := []any{ + map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"}, + map[string]any{"jsonrpc": "2.0", "id": float64(2), "result": "0x2"}, + } + if err := ValidateJsonRpcResponse(resp); err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestParseHexUint64(t *testing.T) { + tests := []struct { + input string + want uint64 + err bool + }{ + {"0", 0, false}, + {"1", 1, false}, + {"a", 10, false}, + {"ff", 255, false}, + {"100", 256, false}, + {"12ab34", 0x12ab34, false}, + {"DEADBEEF", 0xDEADBEEF, false}, + {"xyz", 0, true}, + } + + for _, tt := range tests { + got, err := parseHexUint64(tt.input) + if (err != nil) != tt.err { + t.Errorf("parseHexUint64(%q): error = %v, wantErr %v", tt.input, err, tt.err) + } + if !tt.err && got != tt.want { + t.Errorf("parseHexUint64(%q): got %d, want %d", tt.input, got, tt.want) + } + } +} + +func TestCallHTTPRaw_Success(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"jsonrpc":"2.0","id":1,"result":"0x1"}`)) + })) + defer server.Close() + + target := strings.TrimPrefix(server.URL, "http://") + + var gotRTT bool + err := CallHTTPRaw(context.Background(), 0, "http", "", target, []byte(`{}`), func(resp *http.Response, err error, rtt time.Duration) error { + gotRTT = rtt > 0 + if err != nil { + return err + } + defer resp.Body.Close() + return nil + }) + if err != nil { + t.Fatalf("CallHTTPRaw: %v", err) + } + if !gotRTT { + t.Error("expected positive RTT") + } +} diff --git a/internal/rpc/http.go b/internal/rpc/http.go new file mode 100644 index 00000000..754b1b2b --- /dev/null +++ b/internal/rpc/http.go @@ -0,0 +1,273 @@ +package rpc + +import ( + "bytes" + "context" + "fmt" + "net/http" + "strings" + "sync" + "time" + + jsoniter "github.com/json-iterator/go" +) + +var jsonAPI = jsoniter.ConfigCompatibleWithStandardLibrary + +// sharedTransport is a single http.Transport shared across all goroutines. +// One transport = one connection pool = maximum TCP reuse across all workers. +var sharedTransport = &http.Transport{ + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, +} + +// sharedHTTPClient is a goroutine-safe http.Client using the shared transport. +var sharedHTTPClient = &http.Client{ + Timeout: 300 * time.Second, + Transport: sharedTransport, +} + +// bufPool reuses bytes.Buffer instances for request bodies. +var bufPool = sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, +} + +func (c *Client) callHTTP(ctx context.Context, target string, request []byte, response any) (Metrics, error) { + var metrics Metrics + + protocol := "http://" + if c.transport == "https" { + protocol = "https://" + } + url := protocol + target + + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + buf.Write(request) + defer bufPool.Put(buf) + + req, err := http.NewRequestWithContext(ctx, "POST", url, buf) + if err != nil { + if c.verbose > 0 { + fmt.Printf("\nhttp request creation fail: %s %v\n", url, err) + } + return metrics, err + } + + req.Header.Set("Content-Type", "application/json") + if !strings.HasSuffix(c.transport, "_comp") { + req.Header.Set("Accept-Encoding", "Identity") + } + if c.jwtAuth != "" { + req.Header.Set("Authorization", c.jwtAuth) + } + + start := time.Now() + resp, err := sharedHTTPClient.Do(req) + metrics.RoundTripTime = time.Since(start) + + if c.verbose > 1 { + fmt.Printf("http round-trip time: %v\n", metrics.RoundTripTime) + } + + if err != nil { + if c.verbose > 0 { + fmt.Printf("\nhttp connection fail: %s %v\n", target, err) + } + return metrics, err + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + fmt.Printf("\nfailed to close response body: %v\n", cerr) + } + }() + + if resp.StatusCode != http.StatusOK { + if c.verbose > 1 { + fmt.Printf("\npost result status_code: %d\n", resp.StatusCode) + } + return metrics, fmt.Errorf("http status %v", resp.Status) + } + + unmarshalStart := time.Now() + if err = jsonAPI.NewDecoder(resp.Body).Decode(response); err != nil { + return metrics, fmt.Errorf("cannot decode http body as json %w", err) + } + metrics.UnmarshallingTime = time.Since(unmarshalStart) + + if c.verbose > 1 { + raw, _ := jsonAPI.Marshal(response) + fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(raw)) + } + + return metrics, nil +} + +// CallHTTPRaw sends a raw HTTP POST and invokes the provided handler with the response. +// This matches the v1 rpc.HttpPost signature for backward compatibility. +func CallHTTPRaw(ctx context.Context, verbose int, transport, jwtAuth, target string, request []byte, handler func(*http.Response, error, time.Duration) error) error { + headers := map[string]string{ + "Content-Type": "application/json", + } + if transport != "http_comp" { + headers["Accept-Encoding"] = "Identity" + } + if jwtAuth != "" { + headers["Authorization"] = jwtAuth + } + + protocol := "http://" + if transport == "https" { + protocol = "https://" + } + url := protocol + target + + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(request)) + if err != nil { + if verbose > 0 { + fmt.Printf("\nhttp request creation fail: %s %v\n", url, err) + } + return err + } + for k, v := range headers { + req.Header.Set(k, v) + } + + start := time.Now() + resp, err := sharedHTTPClient.Do(req) + elapsed := time.Since(start) + + return handler(resp, err, elapsed) +} + +// ValidateJsonRpcResponse checks that a response is valid JSON-RPC 2.0. +func ValidateJsonRpcResponse(response any) error { + switch r := response.(type) { + case map[string]any: + return validateJsonRpcResponseObject(r) + case *map[string]any: + if r != nil { + return validateJsonRpcResponseObject(*r) + } + return fmt.Errorf("nil response pointer") + default: + // Try to handle []any (batch response) + if arr, ok := response.([]any); ok { + for _, elem := range arr { + if m, ok := elem.(map[string]any); ok { + if err := validateJsonRpcResponseObject(m); err != nil { + return err + } + } + } + return nil + } + // Use io.ReadCloser or other types - just skip validation + return nil + } +} + +func validateJsonRpcResponseObject(obj map[string]any) error { + jsonrpc, ok := obj["jsonrpc"] + if !ok { + return fmt.Errorf("invalid JSON-RPC response: missing 'jsonrpc' field") + } + if version, ok := jsonrpc.(string); !ok || version != "2.0" { + return fmt.Errorf("noncompliant JSON-RPC 2.0 version") + } + if _, ok := obj["id"]; !ok { + return fmt.Errorf("invalid JSON-RPC response: missing 'id' field") + } + return nil +} + +// GetLatestBlockNumber queries eth_blockNumber and returns the result as uint64. +func GetLatestBlockNumber(ctx context.Context, client *Client, url string) (uint64, Metrics, error) { + type rpcReq struct { + Jsonrpc string `json:"jsonrpc"` + Method string `json:"method"` + Params []interface{} `json:"params"` + Id int `json:"id"` + } + + reqBytes, _ := jsonAPI.Marshal(rpcReq{ + Jsonrpc: "2.0", + Method: "eth_blockNumber", + Params: []interface{}{}, + Id: 1, + }) + + var response any + metrics, err := client.Call(ctx, url, reqBytes, &response) + if err != nil { + return 0, metrics, err + } + + responseMap, ok := response.(map[string]interface{}) + if !ok { + return 0, metrics, fmt.Errorf("response is not a map: %v", response) + } + if resultVal, hasResult := responseMap["result"]; hasResult { + resultStr, isString := resultVal.(string) + if !isString { + return 0, metrics, fmt.Errorf("result is not a string: %v", resultVal) + } + cleanHex := strings.TrimPrefix(resultStr, "0x") + val, err := parseHexUint64(cleanHex) + return val, metrics, err + } + if errorVal, hasError := responseMap["error"]; hasError { + return 0, metrics, fmt.Errorf("RPC error: %v", errorVal) + } + return 0, metrics, fmt.Errorf("no result or error found in response") +} + +func parseHexUint64(s string) (uint64, error) { + var result uint64 + for _, c := range s { + result <<= 4 + switch { + case c >= '0' && c <= '9': + result |= uint64(c - '0') + case c >= 'a' && c <= 'f': + result |= uint64(c - 'a' + 10) + case c >= 'A' && c <= 'F': + result |= uint64(c - 'A' + 10) + default: + return 0, fmt.Errorf("invalid hex character: %c", c) + } + } + return result, nil +} + +// GetConsistentLatestBlock retries until both servers agree on the latest block. +func GetConsistentLatestBlock(verbose int, server1URL, server2URL string, maxRetries int, retryDelay time.Duration) (uint64, error) { + client := NewClient("http", "", verbose) + var bn1, bn2 uint64 + + for i := 0; i < maxRetries; i++ { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + + var err1, err2 error + bn1, _, err1 = GetLatestBlockNumber(ctx, client, server1URL) + bn2, _, err2 = GetLatestBlockNumber(ctx, client, server2URL) + cancel() + + if verbose > 1 { + fmt.Printf("retry: %d nodes: %s, %s latest blocks: %d, %d\n", i+1, server1URL, server2URL, bn1, bn2) + } + + if err1 == nil && err2 == nil && bn1 == bn2 { + return bn1, nil + } + + if i < maxRetries-1 { + time.Sleep(retryDelay) + } + } + + return 0, fmt.Errorf("nodes not synced, last values: %d / %d", bn1, bn2) +} diff --git a/internal/rpc/websocket.go b/internal/rpc/websocket.go new file mode 100644 index 00000000..01da6002 --- /dev/null +++ b/internal/rpc/websocket.go @@ -0,0 +1,68 @@ +package rpc + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/gorilla/websocket" +) + +func (c *Client) callWebSocket(target string, request []byte, response any) (Metrics, error) { + var metrics Metrics + + wsTarget := "ws://" + target + dialer := websocket.Dialer{ + HandshakeTimeout: 300 * time.Second, + EnableCompression: strings.HasSuffix(c.transport, "_comp"), + } + + headers := http.Header{} + if c.jwtAuth != "" { + headers.Set("Authorization", c.jwtAuth) + } + + conn, _, err := dialer.Dial(wsTarget, headers) + if err != nil { + if c.verbose > 0 { + fmt.Printf("\nwebsocket connection fail: %v\n", err) + } + return metrics, err + } + defer func() { + if cerr := conn.Close(); cerr != nil { + fmt.Printf("\nfailed to close websocket connection: %v\n", cerr) + } + }() + + start := time.Now() + if err = conn.WriteMessage(websocket.BinaryMessage, request); err != nil { + if c.verbose > 0 { + fmt.Printf("\nwebsocket write fail: %v\n", err) + } + return metrics, err + } + + _, message, err := conn.NextReader() + if err != nil { + if c.verbose > 0 { + fmt.Printf("\nwebsocket read fail: %v\n", err) + } + return metrics, err + } + metrics.RoundTripTime = time.Since(start) + + unmarshalStart := time.Now() + if err = jsonAPI.NewDecoder(message).Decode(response); err != nil { + return metrics, fmt.Errorf("cannot decode websocket message as json %w", err) + } + metrics.UnmarshallingTime = time.Since(unmarshalStart) + + if c.verbose > 1 { + raw, _ := jsonAPI.Marshal(response) + fmt.Printf("Node: %s\nRequest: %s\nResponse: %v\n", target, request, string(raw)) + } + + return metrics, nil +} diff --git a/internal/runner/executor.go b/internal/runner/executor.go new file mode 100644 index 00000000..0cada179 --- /dev/null +++ b/internal/runner/executor.go @@ -0,0 +1,179 @@ +package runner + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/golang-jwt/jwt/v5" + + "github.com/erigontech/rpc-tests/internal/compare" + "github.com/erigontech/rpc-tests/internal/config" + internalrpc "github.com/erigontech/rpc-tests/internal/rpc" + "github.com/erigontech/rpc-tests/internal/testdata" +) + +// RunTest executes a single test and returns the outcome. +// This is the v2 equivalent of v1's runTest + run methods. +// The client parameter is a pre-created RPC client shared across tests (goroutine-safe). +func RunTest(ctx context.Context, descriptor *testdata.TestDescriptor, cfg *config.Config, client *internalrpc.Client) testdata.TestOutcome { + jsonFilename := filepath.Join(cfg.JSONDir, descriptor.Name) + + outcome := testdata.TestOutcome{} + + var commands []testdata.JsonRpcCommand + var err error + if testdata.IsArchive(jsonFilename) { + commands, err = testdata.LoadFixture(jsonFilename, cfg.SanitizeArchiveExt, &outcome.Metrics) + } else { + commands, err = testdata.LoadFixture(jsonFilename, false, &outcome.Metrics) + } + if err != nil { + outcome.Error = err + return outcome + } + + if len(commands) != 1 { + outcome.Error = errors.New("expected exactly one JSON RPC command in " + jsonFilename) + return outcome + } + + runCommand(ctx, cfg, &commands[0], descriptor, &outcome, client) + return outcome +} + +// runCommand executes a single JSON-RPC command against the target. +func runCommand(ctx context.Context, cfg *config.Config, cmd *testdata.JsonRpcCommand, descriptor *testdata.TestDescriptor, outcome *testdata.TestOutcome, baseClient *internalrpc.Client) { + transportType := descriptor.TransportType + jsonFile := descriptor.Name + request := cmd.Request + + target := cfg.GetTarget(cfg.DaemonUnderTest, descriptor.Name) + + // Use pre-created client; create per-test client only when JWT is needed (fresh iat per request) + client := baseClient + if cfg.JWTSecret != "" { + secretBytes, _ := hex.DecodeString(cfg.JWTSecret) + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "iat": time.Now().Unix(), + }) + tokenString, _ := token.SignedString(secretBytes) + client = internalrpc.NewClient(transportType, "Bearer "+tokenString, cfg.VerboseLevel) + } + + outputAPIFilename, outputDirName, diffFile, daemonFile, expRspFile := compare.OutputFilePaths(cfg.OutputDir, jsonFile) + + if !cfg.VerifyWithDaemon { + var result any + metrics, err := client.Call(ctx, target, request, &result) + outcome.Metrics.RoundTripTime += metrics.RoundTripTime + outcome.Metrics.UnmarshallingTime += metrics.UnmarshallingTime + if err != nil { + outcome.Error = err + return + } + if cfg.VerboseLevel > 2 { + fmt.Printf("%s: [%v]\n", cfg.DaemonUnderTest, result) + } + + compare.ProcessResponse(result, nil, cmd.Response, cfg, cmd, outputDirName, daemonFile, expRspFile, diffFile, outcome) + } else { + target = cfg.GetTarget(config.DaemonOnDefaultPort, descriptor.Name) + + var result any + metrics, err := client.Call(ctx, target, request, &result) + outcome.Metrics.RoundTripTime += metrics.RoundTripTime + outcome.Metrics.UnmarshallingTime += metrics.UnmarshallingTime + if err != nil { + outcome.Error = err + return + } + if cfg.VerboseLevel > 2 { + fmt.Printf("%s: [%v]\n", cfg.DaemonUnderTest, result) + } + + target1 := cfg.GetTarget(cfg.DaemonAsReference, descriptor.Name) + var result1 any + metrics1, err := client.Call(ctx, target1, request, &result1) + outcome.Metrics.RoundTripTime += metrics1.RoundTripTime + outcome.Metrics.UnmarshallingTime += metrics1.UnmarshallingTime + if err != nil { + outcome.Error = err + return + } + if cfg.VerboseLevel > 2 { + fmt.Printf("%s: [%v]\n", cfg.DaemonAsReference, result1) + } + + daemonFile = outputAPIFilename + config.GetJSONFilenameExt(config.DaemonOnDefaultPort, target) + expRspFile = outputAPIFilename + config.GetJSONFilenameExt(cfg.DaemonAsReference, target1) + + compare.ProcessResponse(result, result1, nil, cfg, cmd, outputDirName, daemonFile, expRspFile, diffFile, outcome) + } +} + +// mustAtoi converts a string to int, returning 0 on failure. +func mustAtoi(s string) int { + if s == "" { + return 0 + } + n, err := strconv.Atoi(s) + if err != nil { + return 0 + } + return n +} + +// IsStartTestReached checks if we've reached the start-from-test threshold. +// Uses cfg.StartTestNum which is cached at config init time for zero-alloc lookup. +func IsStartTestReached(cfg *config.Config, testNumber int) bool { + return cfg.StartTest == "" || testNumber >= cfg.StartTestNum +} + +// ShouldRunTest determines if a specific test should actually be executed. +// This encapsulates the v1 scheduling logic. +func ShouldRunTest(cfg *config.Config, testName string, testNumberInAnyLoop int) bool { + if cfg.TestingAPIsWith == "" && cfg.TestingAPIs == "" && (cfg.ReqTestNum == -1 || cfg.ReqTestNum == testNumberInAnyLoop) { + return true + } + if cfg.TestingAPIsWith != "" && checkTestNameForNumber(testName, cfg.ReqTestNum) { + return true + } + if cfg.TestingAPIs != "" && checkTestNameForNumber(testName, cfg.ReqTestNum) { + return true + } + return false +} + +// checkTestNameForNumber checks if a test filename like "test_01.json" matches a requested +// test number. Zero-alloc: extracts the number after the last "_" without regex. +func checkTestNameForNumber(testName string, reqTestNumber int) bool { + if reqTestNumber == -1 { + return true + } + // Find the last "_" to locate the number portion (e.g. "test_01.json" -> "01.json") + idx := strings.LastIndex(testName, "_") + if idx < 0 || idx+1 >= len(testName) { + return false + } + // Extract digits after "_", skip leading zeros + numStr := testName[idx+1:] + // Strip file extension and any non-digit suffix + end := 0 + for end < len(numStr) && numStr[end] >= '0' && numStr[end] <= '9' { + end++ + } + if end == 0 { + return false + } + n, err := strconv.Atoi(numStr[:end]) + if err != nil { + return false + } + return n == reqTestNumber +} diff --git a/internal/runner/runner.go b/internal/runner/runner.go new file mode 100644 index 00000000..7a96609c --- /dev/null +++ b/internal/runner/runner.go @@ -0,0 +1,285 @@ +package runner + +import ( + "bufio" + "context" + "errors" + "fmt" + "os" + "runtime" + "strings" + "sync" + "time" + + "github.com/erigontech/rpc-tests/internal/compare" + "github.com/erigontech/rpc-tests/internal/config" + "github.com/erigontech/rpc-tests/internal/filter" + internalrpc "github.com/erigontech/rpc-tests/internal/rpc" + "github.com/erigontech/rpc-tests/internal/testdata" +) + +// Run executes the full test suite matching v1 runMain behavior. +func Run(ctx context.Context, cancelCtx context.CancelFunc, cfg *config.Config) (int, error) { + startTime := time.Now() + + if err := os.MkdirAll(cfg.OutputDir, 0755); err != nil { + return -1, err + } + + // Print server endpoints + if cfg.Parallel { + fmt.Printf("Run tests in parallel on %s\n", cfg.ServerEndpoints()) + } else { + fmt.Printf("Run tests in serial on %s\n", cfg.ServerEndpoints()) + } + + if strings.Contains(cfg.TransportType, "_comp") { + fmt.Println("Run tests using compression") + } + + // Handle latest block sync for verify mode + if cfg.VerifyWithDaemon && cfg.TestsOnLatestBlock { + server1 := fmt.Sprintf("%s:%d", cfg.DaemonOnHost, cfg.ServerPort) + latestBlock, err := internalrpc.GetConsistentLatestBlock( + cfg.VerboseLevel, server1, cfg.ExternalProviderURL, 10, 1*time.Second) + if err != nil { + fmt.Println("sync on latest block number failed ", err) + return -1, err + } + if cfg.VerboseLevel > 0 { + fmt.Printf("Latest block number for %s, %s: %d\n", server1, cfg.ExternalProviderURL, latestBlock) + } + } + + resultsAbsDir, err := cfg.ResultsAbsDir() + if err != nil { + return -1, err + } + fmt.Printf("Result directory: %s\n", resultsAbsDir) + + // Create filter + f := filter.New(filter.FilterConfig{ + Net: cfg.Net, + ReqTestNum: cfg.ReqTestNum, + TestingAPIs: cfg.TestingAPIs, + TestingAPIsWith: cfg.TestingAPIsWith, + ExcludeAPIList: cfg.ExcludeAPIList, + ExcludeTestList: cfg.ExcludeTestList, + TestsOnLatestBlock: cfg.TestsOnLatestBlock, + DoNotCompareError: cfg.DoNotCompareError, + }) + + // Discover tests + discovery, err := testdata.DiscoverTests(cfg.JSONDir, cfg.ResultsDir) + if err != nil { + fmt.Fprintf(os.Stderr, "Error reading directory %s: %v\n", cfg.JSONDir, err) + return -1, err + } + + // Worker pool setup + var wg sync.WaitGroup + testsChan := make(chan *testdata.TestDescriptor, 2000) + resultsChan := make(chan testdata.TestResult, 2000) + + numWorkers := 1 + if cfg.Parallel { + numWorkers = runtime.NumCPU() + } + + // Pre-create one RPC client per transport type (Client is goroutine-safe) + clients := make(map[string]*internalrpc.Client) + for _, tt := range cfg.TransportTypes() { + clients[tt] = internalrpc.NewClient(tt, "", cfg.VerboseLevel) + } + + // Start workers + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case test := <-testsChan: + if test == nil { + return + } + testOutcome := RunTest(ctx, test, cfg, clients[test.TransportType]) + resultsChan <- testdata.TestResult{Outcome: testOutcome, Test: test} + case <-ctx.Done(): + return + } + } + }() + } + + // Results collector with buffered stdout + var resultsWg sync.WaitGroup + resultsWg.Add(1) + stats := &Stats{} + go func() { + defer resultsWg.Done() + w := bufio.NewWriterSize(os.Stdout, 64*1024) + defer w.Flush() + for { + select { + case result, ok := <-resultsChan: + if !ok { + return + } + file := fmt.Sprintf("%-60s", result.Test.Name) + tt := fmt.Sprintf("%-15s", result.Test.TransportType) + fmt.Fprintf(w, "%04d. %s::%s ", result.Test.Number, tt, file) + + if result.Outcome.Success { + stats.AddSuccess(result.Outcome.Metrics) + if cfg.VerboseLevel > 0 { + fmt.Fprintln(w, "OK") + } else { + fmt.Fprint(w, "OK\r") + } + } else { + stats.AddFailure() + if result.Outcome.Error != nil { + fmt.Fprintf(w, "failed: %s\n", result.Outcome.Error.Error()) + if errors.Is(result.Outcome.Error, compare.ErrDiffMismatch) && result.Outcome.ColoredDiff != "" { + fmt.Fprint(w, result.Outcome.ColoredDiff) + } + } else { + fmt.Fprintf(w, "failed: no error\n") + } + if cfg.ExitOnFail { + w.Flush() + cancelCtx() + return + } + } + w.Flush() + case <-ctx.Done(): + return + } + } + }() + + // Main scheduling loop + testRep := 0 + globalTestNumber := 0 + availableTestedAPIs := discovery.TotalAPIs + scheduledIndex := 0 + + for testRep = 0; testRep < cfg.LoopNumber; testRep++ { + select { + case <-ctx.Done(): + break + default: + } + + if cfg.LoopNumber != 1 { + fmt.Printf("\nTest iteration: %d\n", testRep+1) + } + + transportTypes := cfg.TransportTypes() + for _, transportType := range transportTypes { + select { + case <-ctx.Done(): + break + default: + } + + testNumberInAnyLoop := 1 + globalTestNumber = 0 + + for _, tc := range discovery.Tests { + select { + case <-ctx.Done(): + goto done + default: + } + + globalTestNumber = tc.Number + currAPI := tc.APIName + jsonTestFullName := tc.Name + testName := strings.TrimPrefix(jsonTestFullName, currAPI+"/") + if idx := strings.LastIndex(jsonTestFullName, "/"); idx >= 0 { + testName = jsonTestFullName[idx+1:] + } + + if f.APIUnderTest(currAPI, jsonTestFullName) { + if f.IsSkipped(currAPI, jsonTestFullName, testNumberInAnyLoop) { + if IsStartTestReached(cfg, testNumberInAnyLoop) { + if !cfg.DisplayOnlyFail && cfg.ReqTestNum == -1 { + file := fmt.Sprintf("%-60s", jsonTestFullName) + tt := fmt.Sprintf("%-15s", transportType) + fmt.Printf("%04d. %s::%s skipped\n", testNumberInAnyLoop, tt, file) + } + stats.SkippedTests++ + } + } else { + shouldRun := ShouldRunTest(cfg, testName, testNumberInAnyLoop) + + if shouldRun && IsStartTestReached(cfg, testNumberInAnyLoop) { + testDesc := &testdata.TestDescriptor{ + Name: jsonTestFullName, + Number: testNumberInAnyLoop, + TransportType: transportType, + Index: scheduledIndex, + } + scheduledIndex++ + select { + case <-ctx.Done(): + goto done + case testsChan <- testDesc: + } + stats.ScheduledTests++ + + if cfg.WaitingTime > 0 { + time.Sleep(time.Duration(cfg.WaitingTime) * time.Millisecond) + } + } + } + } + + testNumberInAnyLoop++ + } + } + } + +done: + // Close channels and wait + close(testsChan) + wg.Wait() + close(resultsChan) + resultsWg.Wait() + + if stats.ScheduledTests == 0 && cfg.TestingAPIsWith != "" { + fmt.Printf("WARN: API filter %s selected no tests\n", cfg.TestingAPIsWith) + } + + if cfg.ExitOnFail && stats.FailedTests > 0 { + fmt.Println("WARN: test sequence interrupted by failure (ExitOnFail)") + } + + // Clean empty subfolders + if entries, err := os.ReadDir(cfg.OutputDir); err == nil { + for _, entry := range entries { + if !entry.IsDir() { + continue + } + subfolder := fmt.Sprintf("%s/%s", cfg.OutputDir, entry.Name()) + if subEntries, err := os.ReadDir(subfolder); err == nil && len(subEntries) == 0 { + os.Remove(subfolder) + } + } + } + + // Clean temp dir + os.RemoveAll(config.TempDirName) + + // Print summary + elapsed := time.Since(startTime) + stats.PrintSummary(elapsed, testRep, availableTestedAPIs, globalTestNumber) + + if stats.FailedTests > 0 { + return 1, nil + } + return 0, nil +} diff --git a/internal/runner/runner_bench_test.go b/internal/runner/runner_bench_test.go new file mode 100644 index 00000000..c3de53fe --- /dev/null +++ b/internal/runner/runner_bench_test.go @@ -0,0 +1,58 @@ +package runner + +import ( + "testing" + "time" + + "github.com/erigontech/rpc-tests/internal/config" + "github.com/erigontech/rpc-tests/internal/testdata" +) + +func BenchmarkStats_AddSuccess(b *testing.B) { + metrics := testdata.TestMetrics{ + RoundTripTime: 100 * time.Millisecond, + MarshallingTime: 10 * time.Millisecond, + UnmarshallingTime: 20 * time.Millisecond, + ComparisonCount: 1, + EqualCount: 1, + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + s := &Stats{} + s.AddSuccess(metrics) + } +} + +func BenchmarkShouldRunTest_NoFilters(b *testing.B) { + cfg := config.NewConfig() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ShouldRunTest(cfg, "test_01.json", 1) + } +} + +func BenchmarkShouldRunTest_WithTestNumber(b *testing.B) { + cfg := config.NewConfig() + cfg.ReqTestNum = 5 + b.ResetTimer() + for i := 0; i < b.N; i++ { + ShouldRunTest(cfg, "test_05.json", 5) + } +} + +func BenchmarkCheckTestNameForNumber(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + checkTestNameForNumber("test_01.json", 1) + } +} + +func BenchmarkIsStartTestReached(b *testing.B) { + cfg := config.NewConfig() + cfg.StartTest = "100" + cfg.UpdateDirs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + IsStartTestReached(cfg, 50) + } +} diff --git a/internal/runner/runner_test.go b/internal/runner/runner_test.go new file mode 100644 index 00000000..498361f1 --- /dev/null +++ b/internal/runner/runner_test.go @@ -0,0 +1,149 @@ +package runner + +import ( + "testing" + "time" + + "github.com/erigontech/rpc-tests/internal/config" + "github.com/erigontech/rpc-tests/internal/testdata" +) + +func TestStats_AddSuccess(t *testing.T) { + s := &Stats{} + metrics := testdata.TestMetrics{ + RoundTripTime: 100 * time.Millisecond, + MarshallingTime: 10 * time.Millisecond, + UnmarshallingTime: 20 * time.Millisecond, + ComparisonCount: 1, + EqualCount: 1, + } + + s.AddSuccess(metrics) + s.AddSuccess(metrics) + + if s.SuccessTests != 2 { + t.Errorf("SuccessTests: got %d, want 2", s.SuccessTests) + } + if s.ExecutedTests != 2 { + t.Errorf("ExecutedTests: got %d, want 2", s.ExecutedTests) + } + if s.TotalRoundTripTime != 200*time.Millisecond { + t.Errorf("TotalRoundTripTime: got %v, want 200ms", s.TotalRoundTripTime) + } + if s.TotalComparisonCount != 2 { + t.Errorf("TotalComparisonCount: got %d, want 2", s.TotalComparisonCount) + } + if s.TotalEqualCount != 2 { + t.Errorf("TotalEqualCount: got %d, want 2", s.TotalEqualCount) + } +} + +func TestStats_AddFailure(t *testing.T) { + s := &Stats{} + s.AddFailure() + s.AddFailure() + + if s.FailedTests != 2 { + t.Errorf("FailedTests: got %d, want 2", s.FailedTests) + } + if s.ExecutedTests != 2 { + t.Errorf("ExecutedTests: got %d, want 2", s.ExecutedTests) + } +} + +func TestMustAtoi(t *testing.T) { + tests := []struct { + input string + want int + }{ + {"", 0}, + {"0", 0}, + {"1", 1}, + {"42", 42}, + {"abc", 0}, + } + + for _, tt := range tests { + got := mustAtoi(tt.input) + if got != tt.want { + t.Errorf("mustAtoi(%q): got %d, want %d", tt.input, got, tt.want) + } + } +} + +func TestIsStartTestReached(t *testing.T) { + cfg := config.NewConfig() + + // No start test set + if !IsStartTestReached(cfg, 1) { + t.Error("should return true when no start test is set") + } + + cfg.StartTest = "10" + cfg.UpdateDirs() + if IsStartTestReached(cfg, 5) { + t.Error("test 5 should not be reached when start is 10") + } + if !IsStartTestReached(cfg, 10) { + t.Error("test 10 should be reached when start is 10") + } + if !IsStartTestReached(cfg, 15) { + t.Error("test 15 should be reached when start is 10") + } +} + +func TestShouldRunTest_NoFilters(t *testing.T) { + cfg := config.NewConfig() + if !ShouldRunTest(cfg, "test_01.json", 1) { + t.Error("no filters should run all tests") + } +} + +func TestShouldRunTest_SpecificTestNumber(t *testing.T) { + cfg := config.NewConfig() + cfg.ReqTestNum = 5 + if ShouldRunTest(cfg, "test_01.json", 1) { + t.Error("test 1 should not run when ReqTestNum=5") + } + if !ShouldRunTest(cfg, "test_01.json", 5) { + t.Error("test 5 should run when ReqTestNum=5") + } +} + +func TestShouldRunTest_WithAPIPatternFilter(t *testing.T) { + cfg := config.NewConfig() + cfg.TestingAPIsWith = "eth_" + // When TestingAPIsWith is set but no specific test number, should run + if !ShouldRunTest(cfg, "test_01.json", 1) { + t.Error("should run when API pattern matches and no test number filter") + } +} + +func TestShouldRunTest_WithExactAPIFilter(t *testing.T) { + cfg := config.NewConfig() + cfg.TestingAPIs = "eth_call" + if !ShouldRunTest(cfg, "test_01.json", 1) { + t.Error("should run when exact API matches and no test number filter") + } +} + +func TestCheckTestNameForNumber(t *testing.T) { + tests := []struct { + name string + num int + want bool + }{ + {"test_01.json", 1, true}, + {"test_01.json", 2, false}, + {"test_10.json", 10, true}, + {"test_10.json", 1, false}, + {"test_01.json", -1, true}, + } + + for _, tt := range tests { + got := checkTestNameForNumber(tt.name, tt.num) + if got != tt.want { + t.Errorf("checkTestNameForNumber(%q, %d): got %v, want %v", tt.name, tt.num, got, tt.want) + } + } +} diff --git a/internal/runner/stats.go b/internal/runner/stats.go new file mode 100644 index 00000000..74b7d32e --- /dev/null +++ b/internal/runner/stats.go @@ -0,0 +1,59 @@ +package runner + +import ( + "fmt" + "time" + + "github.com/erigontech/rpc-tests/internal/testdata" +) + +// Stats aggregates metrics and counts across all tests. +type Stats struct { + SuccessTests int + FailedTests int + ExecutedTests int + SkippedTests int + ScheduledTests int + + TotalRoundTripTime time.Duration + TotalMarshallingTime time.Duration + TotalUnmarshallingTime time.Duration + TotalComparisonCount int + TotalEqualCount int +} + +// AddSuccess records a successful test result. +func (s *Stats) AddSuccess(metrics testdata.TestMetrics) { + s.SuccessTests++ + s.ExecutedTests++ + s.TotalRoundTripTime += metrics.RoundTripTime + s.TotalMarshallingTime += metrics.MarshallingTime + s.TotalUnmarshallingTime += metrics.UnmarshallingTime + s.TotalComparisonCount += metrics.ComparisonCount + s.TotalEqualCount += metrics.EqualCount +} + +// AddFailure records a failed test result. +func (s *Stats) AddFailure() { + s.FailedTests++ + s.ExecutedTests++ +} + +// PrintSummary prints the v1-compatible summary output. +func (s *Stats) PrintSummary(elapsed time.Duration, iterations, totalAPIs, totalTests int) { + fmt.Println("\n ") + fmt.Printf("Total HTTP round-trip time: %v\n", s.TotalRoundTripTime) + fmt.Printf("Total Marshalling time: %v\n", s.TotalMarshallingTime) + fmt.Printf("Total Unmarshalling time: %v\n", s.TotalUnmarshallingTime) + fmt.Printf("Total Comparison count: %v\n", s.TotalComparisonCount) + fmt.Printf("Total Equal count: %v\n", s.TotalEqualCount) + fmt.Printf("Test session duration: %v\n", elapsed) + fmt.Printf("Test session iterations: %d\n", iterations) + fmt.Printf("Test suite total APIs: %d\n", totalAPIs) + fmt.Printf("Test suite total tests: %d\n", totalTests) + fmt.Printf("Number of skipped tests: %d\n", s.SkippedTests) + fmt.Printf("Number of selected tests: %d\n", s.ScheduledTests) + fmt.Printf("Number of executed tests: %d\n", s.ExecutedTests) + fmt.Printf("Number of success tests: %d\n", s.SuccessTests) + fmt.Printf("Number of failed tests: %d\n", s.FailedTests) +} diff --git a/internal/testdata/discovery.go b/internal/testdata/discovery.go new file mode 100644 index 00000000..2debc3fd --- /dev/null +++ b/internal/testdata/discovery.go @@ -0,0 +1,99 @@ +package testdata + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" +) + +var numberRe = regexp.MustCompile(`\d+`) + +// ExtractNumber extracts the first number from a filename for sorting. +func ExtractNumber(filename string) int { + match := numberRe.FindString(filename) + if match != "" { + num, _ := strconv.Atoi(match) + return num + } + return 0 +} + +// validTestExtensions lists the file extensions accepted as test fixtures. +var validTestExtensions = map[string]bool{ + ".json": true, + ".tar": true, + ".zip": true, + ".gzip": true, +} + +// DiscoverTests scans the test directory and returns all test cases with global numbering. +// The global numbering matches v1 exactly: alphabetical API dirs, numeric sort within API, +// global counter increments for every valid test file regardless of filtering. +func DiscoverTests(jsonDir, resultsDir string) (*DiscoveryResult, error) { + dirs, err := os.ReadDir(jsonDir) + if err != nil { + return nil, fmt.Errorf("error reading directory %s: %w", jsonDir, err) + } + + sort.Slice(dirs, func(i, j int) bool { + return dirs[i].Name() < dirs[j].Name() + }) + + result := &DiscoveryResult{} + globalTestNumber := 0 + + for _, dirEntry := range dirs { + apiName := dirEntry.Name() + + // Skip results folder and hidden folders + if apiName == resultsDir || strings.HasPrefix(apiName, ".") { + continue + } + + testDir := filepath.Join(jsonDir, apiName) + info, err := os.Stat(testDir) + if err != nil || !info.IsDir() { + continue + } + + result.TotalAPIs++ + + testEntries, err := os.ReadDir(testDir) + if err != nil { + continue + } + + // Sort test files by number (matching v1 extractNumber sort) + sort.Slice(testEntries, func(i, j int) bool { + return ExtractNumber(testEntries[i].Name()) < ExtractNumber(testEntries[j].Name()) + }) + + for _, testEntry := range testEntries { + testName := testEntry.Name() + + if !strings.HasPrefix(testName, "test_") { + continue + } + + ext := filepath.Ext(testName) + if !validTestExtensions[ext] { + continue + } + + globalTestNumber++ + + result.Tests = append(result.Tests, TestCase{ + Name: filepath.Join(apiName, testName), + Number: globalTestNumber, + APIName: apiName, + }) + } + } + + result.TotalTests = globalTestNumber + return result, nil +} diff --git a/internal/testdata/discovery_bench_test.go b/internal/testdata/discovery_bench_test.go new file mode 100644 index 00000000..8ba76012 --- /dev/null +++ b/internal/testdata/discovery_bench_test.go @@ -0,0 +1,21 @@ +package testdata + +import "testing" + +func BenchmarkExtractNumber(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + ExtractNumber("test_01.json") + ExtractNumber("test_10.tar.gz") + ExtractNumber("test_99.tar.bz2") + } +} + +func BenchmarkIsArchive(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + IsArchive("test_01.json") + IsArchive("test_01.tar.gz") + IsArchive("test_01.tar.bz2") + } +} diff --git a/internal/testdata/discovery_test.go b/internal/testdata/discovery_test.go new file mode 100644 index 00000000..e93185e9 --- /dev/null +++ b/internal/testdata/discovery_test.go @@ -0,0 +1,234 @@ +package testdata + +import ( + "os" + "path/filepath" + "testing" +) + +func TestExtractNumber(t *testing.T) { + tests := []struct { + input string + want int + }{ + {"test_01.json", 1}, + {"test_10.json", 10}, + {"test_100.tar", 100}, + {"test_001.gzip", 1}, + {"no_number.json", 0}, + {"", 0}, + } + + for _, tt := range tests { + got := ExtractNumber(tt.input) + if got != tt.want { + t.Errorf("ExtractNumber(%q): got %d, want %d", tt.input, got, tt.want) + } + } +} + +func TestIsArchive(t *testing.T) { + tests := []struct { + input string + want bool + }{ + {"test_01.json", false}, + {"test_01.tar", true}, + {"test_01.gzip", true}, + {"test_01.tar.gz", true}, + {"test_01.tar.bz2", true}, + } + + for _, tt := range tests { + got := IsArchive(tt.input) + if got != tt.want { + t.Errorf("IsArchive(%q): got %v, want %v", tt.input, got, tt.want) + } + } +} + +func setupTestDir(t *testing.T) string { + t.Helper() + dir := t.TempDir() + + // Create API dirs with test files + apis := []struct { + name string + tests []string + }{ + {"eth_call", []string{"test_01.json", "test_02.json", "test_10.json"}}, + {"eth_getBalance", []string{"test_01.json"}}, + {"debug_traceCall", []string{"test_01.json", "test_02.json"}}, + } + + for _, api := range apis { + apiDir := filepath.Join(dir, api.name) + if err := os.MkdirAll(apiDir, 0755); err != nil { + t.Fatal(err) + } + for _, test := range api.tests { + content := `[{"request":{"jsonrpc":"2.0","method":"` + api.name + `","params":[],"id":1},"response":{"jsonrpc":"2.0","id":1,"result":"0x0"}}]` + if err := os.WriteFile(filepath.Join(apiDir, test), []byte(content), 0644); err != nil { + t.Fatal(err) + } + } + } + + // Add results dir (should be skipped) + if err := os.MkdirAll(filepath.Join(dir, "results"), 0755); err != nil { + t.Fatal(err) + } + + // Add hidden dir (should be skipped) + if err := os.MkdirAll(filepath.Join(dir, ".hidden"), 0755); err != nil { + t.Fatal(err) + } + + // Add a non-test file (should be skipped) + apiDir := filepath.Join(dir, "eth_call") + if err := os.WriteFile(filepath.Join(apiDir, "README.md"), []byte("test"), 0644); err != nil { + t.Fatal(err) + } + + return dir +} + +func TestDiscoverTests(t *testing.T) { + dir := setupTestDir(t) + + result, err := DiscoverTests(dir, "results") + if err != nil { + t.Fatalf("DiscoverTests: %v", err) + } + + if result.TotalAPIs != 3 { + t.Errorf("TotalAPIs: got %d, want 3", result.TotalAPIs) + } + + // debug_traceCall(2) + eth_call(3) + eth_getBalance(1) = 6 total tests + if result.TotalTests != 6 { + t.Errorf("TotalTests: got %d, want 6", result.TotalTests) + } + + if len(result.Tests) != 6 { + t.Fatalf("len(Tests): got %d, want 6", len(result.Tests)) + } + + // Verify alphabetical API order + expectedAPIs := []string{"debug_traceCall", "debug_traceCall", "eth_call", "eth_call", "eth_call", "eth_getBalance"} + for i, tc := range result.Tests { + if tc.APIName != expectedAPIs[i] { + t.Errorf("test[%d] API: got %q, want %q", i, tc.APIName, expectedAPIs[i]) + } + } + + // Verify global numbering is sequential + for i, tc := range result.Tests { + if tc.Number != i+1 { + t.Errorf("test[%d] Number: got %d, want %d", i, tc.Number, i+1) + } + } +} + +func TestDiscoverTests_NumericSort(t *testing.T) { + dir := setupTestDir(t) + + result, err := DiscoverTests(dir, "results") + if err != nil { + t.Fatalf("DiscoverTests: %v", err) + } + + // eth_call tests should be sorted: test_01, test_02, test_10 (numeric, not lexicographic) + ethCallTests := []TestCase{} + for _, tc := range result.Tests { + if tc.APIName == "eth_call" { + ethCallTests = append(ethCallTests, tc) + } + } + + if len(ethCallTests) != 3 { + t.Fatalf("eth_call tests: got %d, want 3", len(ethCallTests)) + } + + expectedNames := []string{ + "eth_call/test_01.json", + "eth_call/test_02.json", + "eth_call/test_10.json", + } + for i, tc := range ethCallTests { + // Normalize path separator for comparison + got := filepath.ToSlash(tc.Name) + if got != expectedNames[i] { + t.Errorf("eth_call test[%d]: got %q, want %q", i, got, expectedNames[i]) + } + } +} + +func TestDiscoverTests_EmptyDir(t *testing.T) { + dir := t.TempDir() + + result, err := DiscoverTests(dir, "results") + if err != nil { + t.Fatalf("DiscoverTests: %v", err) + } + + if result.TotalAPIs != 0 { + t.Errorf("TotalAPIs: got %d, want 0", result.TotalAPIs) + } + if result.TotalTests != 0 { + t.Errorf("TotalTests: got %d, want 0", result.TotalTests) + } +} + +func TestDiscoverTests_NonexistentDir(t *testing.T) { + _, err := DiscoverTests("/nonexistent/path", "results") + if err == nil { + t.Error("expected error for nonexistent directory") + } +} + +func TestLoadFixture_JSON(t *testing.T) { + dir := setupTestDir(t) + metrics := &TestMetrics{} + + commands, err := LoadFixture(filepath.Join(dir, "eth_call", "test_01.json"), false, metrics) + if err != nil { + t.Fatalf("LoadFixture: %v", err) + } + + if len(commands) != 1 { + t.Fatalf("commands: got %d, want 1", len(commands)) + } + + if commands[0].Request == nil { + t.Error("request should not be nil") + } + if commands[0].Response == nil { + t.Error("response should not be nil") + } + if metrics.UnmarshallingTime == 0 { + t.Error("UnmarshallingTime should be > 0") + } +} + +func TestLoadFixture_FileNotFound(t *testing.T) { + metrics := &TestMetrics{} + _, err := LoadFixture("/nonexistent/path.json", false, metrics) + if err == nil { + t.Error("expected error for nonexistent file") + } +} + +func TestLoadFixture_InvalidJSON(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "bad.json") + if err := os.WriteFile(path, []byte("not json"), 0644); err != nil { + t.Fatal(err) + } + + metrics := &TestMetrics{} + _, err := LoadFixture(path, false, metrics) + if err == nil { + t.Error("expected error for invalid JSON") + } +} diff --git a/internal/testdata/loader.go b/internal/testdata/loader.go new file mode 100644 index 00000000..da5589f5 --- /dev/null +++ b/internal/testdata/loader.go @@ -0,0 +1,72 @@ +package testdata + +import ( + "archive/tar" + "bufio" + "errors" + "fmt" + "os" + "strings" + "time" + + jsoniter "github.com/json-iterator/go" + + "github.com/erigontech/rpc-tests/cmd/integration/archive" +) + +var json = jsoniter.ConfigCompatibleWithStandardLibrary + +// IsArchive returns true if the file is not a plain .json file. +func IsArchive(filename string) bool { + return !strings.HasSuffix(filename, ".json") +} + +// LoadFixture loads JSON-RPC commands from a test fixture file. +// Supports .json, .tar, .tar.gz, .tar.bz2 formats via the archive package. +func LoadFixture(path string, sanitizeExt bool, metrics *TestMetrics) ([]JsonRpcCommand, error) { + if IsArchive(path) { + return extractJsonCommands(path, sanitizeExt, metrics) + } + return readJsonCommands(path, metrics) +} + +// readJsonCommands reads JSON-RPC commands from a plain JSON file. +func readJsonCommands(path string, metrics *TestMetrics) ([]JsonRpcCommand, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("cannot open file %s: %w", path, err) + } + defer func() { + if cerr := file.Close(); cerr != nil { + fmt.Printf("failed to close file %s: %v\n", path, cerr) + } + }() + + reader := bufio.NewReaderSize(file, 8*os.Getpagesize()) + + var commands []JsonRpcCommand + start := time.Now() + if err := json.NewDecoder(reader).Decode(&commands); err != nil { + return nil, fmt.Errorf("cannot parse JSON %s: %w", path, err) + } + metrics.UnmarshallingTime += time.Since(start) + return commands, nil +} + +// extractJsonCommands reads JSON-RPC commands from an archive file. +func extractJsonCommands(path string, sanitizeExt bool, metrics *TestMetrics) ([]JsonRpcCommand, error) { + var commands []JsonRpcCommand + err := archive.Extract(path, sanitizeExt, func(reader *tar.Reader) error { + bufferedReader := bufio.NewReaderSize(reader, 8*os.Getpagesize()) + start := time.Now() + if err := json.NewDecoder(bufferedReader).Decode(&commands); err != nil { + return fmt.Errorf("failed to decode JSON: %w", err) + } + metrics.UnmarshallingTime += time.Since(start) + return nil + }) + if err != nil { + return nil, errors.New("cannot extract archive file " + path) + } + return commands, nil +} diff --git a/internal/testdata/types.go b/internal/testdata/types.go new file mode 100644 index 00000000..bbfcc55c --- /dev/null +++ b/internal/testdata/types.go @@ -0,0 +1,79 @@ +package testdata + +import ( + "time" + + jsoniter "github.com/json-iterator/go" +) + +// TestCase represents a discovered test file with its global numbering. +type TestCase struct { + Name string // Relative path: "api_name/test_NN.json" + Number int // Global test number (1-based, across all APIs) + APIName string // API directory name + TransportType string // Assigned at scheduling time +} + +// TestDescriptor is a scheduled test sent to workers. +type TestDescriptor struct { + Name string + Number int + TransportType string + Index int // Position in scheduled order (for ordered output) +} + +// TestResult holds a test outcome and its descriptor. +type TestResult struct { + Outcome TestOutcome + Test *TestDescriptor +} + +// TestOutcome holds the result of executing a single test. +type TestOutcome struct { + Success bool + Error error + ColoredDiff string + Metrics TestMetrics +} + +// TestMetrics tracks timing and comparison statistics for a single test. +type TestMetrics struct { + RoundTripTime time.Duration + MarshallingTime time.Duration + UnmarshallingTime time.Duration + ComparisonCount int + EqualCount int +} + +// JsonRpcResponseMetadata holds metadata about the expected response. +type JsonRpcResponseMetadata struct { + PathOptions jsoniter.RawMessage `json:"pathOptions"` +} + +// JsonRpcTestMetadata holds metadata about the test request/response. +type JsonRpcTestMetadata struct { + Request interface{} `json:"request"` + Response *JsonRpcResponseMetadata `json:"response"` +} + +// JsonRpcTest holds test-level information (identifier, description, metadata). +type JsonRpcTest struct { + Identifier string `json:"id"` + Reference string `json:"reference"` + Description string `json:"description"` + Metadata *JsonRpcTestMetadata `json:"metadata"` +} + +// JsonRpcCommand represents a single JSON-RPC command in a test fixture. +type JsonRpcCommand struct { + Request jsoniter.RawMessage `json:"request"` + Response any `json:"response"` + TestInfo *JsonRpcTest `json:"test"` +} + +// DiscoveryResult holds the results of test discovery. +type DiscoveryResult struct { + Tests []TestCase + TotalAPIs int + TotalTests int // Global test count (including non-matching tests) +} From c74d0b62a443efef99dfcc2cea38dbcc9796e2bc Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 13 Feb 2026 19:12:12 +0100 Subject: [PATCH 60/87] add golangci-lint config and fix all lint issues in internal/ Add .golangci.yml (v2 format) with 19 linters across 4 categories: bugs, simplification, performance, and style. Legacy cmd/ code is excluded. Add post-build Claude Code hook to run lint automatically. Fix all 20+ lint issues: modernize interface{} to any, use error wrapping (%w), errors.Is for EOF checks, slices.Contains, range-over-int loops, b.Loop() in benchmarks, fix ineffective break statements in runner scheduling loop, and replace empty SA9003 branch with proper error handling. Co-Authored-By: Claude Opus 4.6 --- .claude/hooks/lint-after-build.sh | 19 +++++ .claude/settings.json | 17 ++++ .golangci.yml | 97 +++++++++++++++++++++++ internal/compare/comparator.go | 35 ++++---- internal/compare/comparator_bench_test.go | 40 +++++----- internal/compare/comparator_test.go | 52 ++++++------ internal/config/config.go | 3 +- internal/filter/filter.go | 15 +--- internal/filter/filter_bench_test.go | 14 ++-- internal/perf/perf_bench_test.go | 12 +-- internal/perf/perf_test.go | 7 +- internal/perf/report.go | 17 ++-- internal/perf/sequence.go | 3 +- internal/perf/vegeta.go | 2 +- internal/rpc/client_bench_test.go | 8 +- internal/rpc/client_test.go | 8 +- internal/rpc/http.go | 14 ++-- internal/runner/runner.go | 10 +-- internal/runner/runner_bench_test.go | 10 +-- internal/testdata/types.go | 2 +- 20 files changed, 254 insertions(+), 131 deletions(-) create mode 100755 .claude/hooks/lint-after-build.sh create mode 100644 .claude/settings.json create mode 100644 .golangci.yml diff --git a/.claude/hooks/lint-after-build.sh b/.claude/hooks/lint-after-build.sh new file mode 100755 index 00000000..13e22325 --- /dev/null +++ b/.claude/hooks/lint-after-build.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Runs golangci-lint after go build commands. + +INPUT=$(cat) + +COMMAND=$(echo "$INPUT" | jq -r '.tool_input.command // empty') + +if echo "$COMMAND" | grep -q 'go build'; then + cd "$CLAUDE_PROJECT_DIR" || exit 0 + OUTPUT=$(golangci-lint run ./... 2>&1) + EXIT_CODE=$? + if [ $EXIT_CODE -ne 0 ]; then + echo "golangci-lint found issues:" >&2 + echo "$OUTPUT" >&2 + exit 2 + fi +fi + +exit 0 \ No newline at end of file diff --git a/.claude/settings.json b/.claude/settings.json new file mode 100644 index 00000000..fbcaddb0 --- /dev/null +++ b/.claude/settings.json @@ -0,0 +1,17 @@ +{ + "hooks": { + "PostToolUse": [ + { + "matcher": "Bash", + "hooks": [ + { + "type": "command", + "command": "\"$CLAUDE_PROJECT_DIR\"/.claude/hooks/lint-after-build.sh", + "timeout": 120, + "statusMessage": "Running golangci-lint..." + } + ] + } + ] + } +} \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000..dd00531f --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,97 @@ +version: "2" + +linters: + default: none + enable: + # Bugs & correctness + - errcheck + - govet + - staticcheck + - ineffassign + - unused + - bodyclose + - durationcheck + - errorlint + - nilerr + + # Code simplification + - intrange + - copyloopvar + - modernize + + # Performance + - prealloc + + # Style consistency + - misspell + - unconvert + - wastedassign + + settings: + errcheck: + exclude-functions: + - fmt.Fprintf + - fmt.Fprintln + - fmt.Fprint + - fmt.Printf + - fmt.Println + - fmt.Print + - (io.Closer).Close + - (*os.File).Close + - (*bufio.Writer).Flush + - (*bufio.Writer).Write + - (net/http.ResponseWriter).Write + - (*encoding/json.Encoder).Encode + - os.Remove + - os.RemoveAll + - os.MkdirAll + govet: + enable-all: true + disable: + - fieldalignment + - shadow # Too noisy, many false positives with err shadowing + staticcheck: + checks: + - "all" + - "-QF*" # Disable quickfix suggestions + - "-ST1003" # Naming conventions: renaming exports is a breaking change + errorlint: + asserts: false + + exclusions: + presets: + - comments + - std-error-handling + rules: + # Test files: relax errcheck + - linters: [errcheck] + path: _test\.go + # Legacy cmd/ code - not part of v2 refactor + - path: cmd/archive/ + linters: [errcheck, ineffassign, wastedassign, staticcheck, govet] + - path: cmd/integration/main\.go + linters: [errcheck, errorlint] + - path: cmd/integration/jsondiff/ + linters: [ineffassign, modernize, intrange] + - path: cmd/integration/archive/ + linters: [errorlint, intrange, modernize] + - path: cmd/integration/jsondiff/ + linters: [wastedassign] + # bodyclose false positives on websocket dial and raw HTTP handler + - path: internal/rpc/websocket\.go + linters: [bodyclose] + - path: internal/rpc/http\.go + text: "response body must be closed" + linters: [bodyclose] + +formatters: + enable: + - gofmt + - goimports + +output: + sort-order: + - file + +run: + timeout: 5m diff --git a/internal/compare/comparator.go b/internal/compare/comparator.go index 2dd95222..e7cf01ff 100644 --- a/internal/compare/comparator.go +++ b/internal/compare/comparator.go @@ -75,8 +75,8 @@ func ProcessResponse( } // Check "don't care" conditions - responseMap, respIsMap := response.(map[string]interface{}) - expectedMap, expIsMap := expectedResponse.(map[string]interface{}) + responseMap, respIsMap := response.(map[string]any) + expectedMap, expIsMap := expectedResponse.(map[string]any) if respIsMap && expIsMap { _, responseHasResult := responseMap["result"] expectedResult, expectedHasResult := expectedMap["result"] @@ -190,13 +190,13 @@ func ProcessResponse( // compareResponses does a fast structural equality check. func compareResponses(lhs, rhs any) bool { - leftMap, leftIsMap := lhs.(map[string]interface{}) - rightMap, rightIsMap := rhs.(map[string]interface{}) + leftMap, leftIsMap := lhs.(map[string]any) + rightMap, rightIsMap := rhs.(map[string]any) if leftIsMap && rightIsMap { return mapsEqual(leftMap, rightMap) } - leftArray, leftIsArray := lhs.([]map[string]interface{}) - rightArray, rightIsArray := rhs.([]map[string]interface{}) + leftArray, leftIsArray := lhs.([]map[string]any) + rightArray, rightIsArray := rhs.([]map[string]any) if leftIsArray && rightIsArray { return arrayEqual(leftArray, rightArray) } @@ -204,7 +204,7 @@ func compareResponses(lhs, rhs any) bool { } // jsonValuesEqual compares two JSON-decoded values without reflection for common types. -// JSON only produces: string, float64, bool, nil, map[string]interface{}, []interface{}. +// JSON only produces: string, float64, bool, nil, map[string]any, []any. func jsonValuesEqual(lhs, rhs any) bool { if lhs == nil && rhs == nil { return true @@ -222,11 +222,11 @@ func jsonValuesEqual(lhs, rhs any) bool { case bool: r, ok := rhs.(bool) return ok && l == r - case map[string]interface{}: - r, ok := rhs.(map[string]interface{}) + case map[string]any: + r, ok := rhs.(map[string]any) return ok && mapsEqual(l, r) - case []interface{}: - r, ok := rhs.([]interface{}) + case []any: + r, ok := rhs.([]any) if !ok || len(l) != len(r) { return false } @@ -241,7 +241,7 @@ func jsonValuesEqual(lhs, rhs any) bool { } } -func mapsEqual(lhs, rhs map[string]interface{}) bool { +func mapsEqual(lhs, rhs map[string]any) bool { if len(lhs) != len(rhs) { return false } @@ -254,7 +254,7 @@ func mapsEqual(lhs, rhs map[string]interface{}) bool { return true } -func arrayEqual(lhs, rhs []map[string]interface{}) bool { +func arrayEqual(lhs, rhs []map[string]any) bool { if len(lhs) != len(rhs) { return false } @@ -281,7 +281,7 @@ func marshalToFile(value any, filename string, metrics *testdata.TestMetrics) er metrics.MarshallingTime += time.Since(start) if err := os.WriteFile(filename, buf.Bytes(), 0644); err != nil { - return fmt.Errorf("exception on file write: %v", err) + return fmt.Errorf("exception on file write: %w", err) } return nil } @@ -293,7 +293,7 @@ func dumpJSONs(dump bool, daemonFile, expRspFile, outputDir string, response, ex } if err := os.MkdirAll(outputDir, 0755); err != nil { - return fmt.Errorf("exception on makedirs: %s %v", outputDir, err) + return fmt.Errorf("exception on makedirs: %s %w", outputDir, err) } if daemonFile != "" { @@ -403,8 +403,9 @@ func runExternalCompare(useJsonDiff bool, errorFile, file1, file2, diffFile stri cmd := exec.CommandContext(ctx, "sh", "-c", cmdStr) if err := cmd.Run(); err != nil { // diff returns 1 when files differ, which is not an error for us - if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == 1 && !useJsonDiff { - // diff found differences + var exitErr *exec.ExitError + if !(errors.As(err, &exitErr) && exitErr.ExitCode() == 1 && !useJsonDiff) { + return false, fmt.Errorf("external compare command failed: %w", err) } } diff --git a/internal/compare/comparator_bench_test.go b/internal/compare/comparator_bench_test.go index 13d57d89..21ca8ea7 100644 --- a/internal/compare/comparator_bench_test.go +++ b/internal/compare/comparator_bench_test.go @@ -9,28 +9,28 @@ import ( ) func BenchmarkCompareResponses_EqualMaps(b *testing.B) { - a := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - c := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + a := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + c := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { compareResponses(a, c) } } func BenchmarkCompareResponses_DifferentMaps(b *testing.B) { - a := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - c := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + a := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + c := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { compareResponses(a, c) } } func BenchmarkCompareResponses_LargeMap(b *testing.B) { - makeMap := func(n int) map[string]interface{} { - m := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1)} - result := make(map[string]interface{}, n) - for j := 0; j < n; j++ { + makeMap := func(n int) map[string]any { + m := map[string]any{"jsonrpc": "2.0", "id": float64(1)} + result := make(map[string]any, n) + for j := range n { result[string(rune('a'+j%26))+string(rune('0'+j/26))] = float64(j) } m["result"] = result @@ -39,7 +39,7 @@ func BenchmarkCompareResponses_LargeMap(b *testing.B) { a := makeMap(100) c := makeMap(100) b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { compareResponses(a, c) } } @@ -47,11 +47,11 @@ func BenchmarkCompareResponses_LargeMap(b *testing.B) { func BenchmarkProcessResponse_ExactMatch(b *testing.B) { dir := b.TempDir() cfg := config.NewConfig() - response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { outcome := &testdata.TestOutcome{} cmd := &testdata.JsonRpcCommand{} ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) @@ -67,11 +67,11 @@ func BenchmarkProcessResponse_DiffMismatch_JsonDiffGo(b *testing.B) { expRspFile := filepath.Join(dir, "expected.json") diffFile := filepath.Join(dir, "diff.json") - response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { outcome := &testdata.TestOutcome{} cmd := &testdata.JsonRpcCommand{} ProcessResponse(response, nil, expected, cfg, cmd, dir, daemonFile, expRspFile, diffFile, outcome) @@ -82,11 +82,11 @@ func BenchmarkDumpJSONs(b *testing.B) { dir := b.TempDir() daemonFile := filepath.Join(dir, "daemon.json") expRspFile := filepath.Join(dir, "expected.json") - response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { metrics := &testdata.TestMetrics{} dumpJSONs(true, daemonFile, expRspFile, dir, response, expected, metrics) } diff --git a/internal/compare/comparator_test.go b/internal/compare/comparator_test.go index 7bd4e7c6..895a8611 100644 --- a/internal/compare/comparator_test.go +++ b/internal/compare/comparator_test.go @@ -10,32 +10,32 @@ import ( ) func TestCompareResponses_EqualMaps(t *testing.T) { - a := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - b := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + a := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + b := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} if !compareResponses(a, b) { t.Error("identical maps should be equal") } } func TestCompareResponses_DifferentMaps(t *testing.T) { - a := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - b := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + a := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + b := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} if compareResponses(a, b) { t.Error("different maps should not be equal") } } func TestCompareResponses_DifferentLengths(t *testing.T) { - a := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1)} - b := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + a := map[string]any{"jsonrpc": "2.0", "id": float64(1)} + b := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} if compareResponses(a, b) { t.Error("maps with different lengths should not be equal") } } func TestCompareResponses_EqualArrays(t *testing.T) { - a := []map[string]interface{}{{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"}} - b := []map[string]interface{}{{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"}} + a := []map[string]any{{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"}} + b := []map[string]any{{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"}} if !compareResponses(a, b) { t.Error("identical arrays should be equal") } @@ -48,8 +48,8 @@ func TestProcessResponse_WithoutCompare(t *testing.T) { outcome := &testdata.TestOutcome{} cmd := &testdata.JsonRpcCommand{} - response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) @@ -64,8 +64,8 @@ func TestProcessResponse_ExactMatch(t *testing.T) { outcome := &testdata.TestOutcome{} cmd := &testdata.JsonRpcCommand{} - response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) @@ -83,8 +83,8 @@ func TestProcessResponse_NullExpectedResult(t *testing.T) { outcome := &testdata.TestOutcome{} cmd := &testdata.JsonRpcCommand{} - response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0xabc"} - expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": nil} + response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0xabc"} + expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": nil} ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) @@ -99,8 +99,8 @@ func TestProcessResponse_NullExpectedError(t *testing.T) { outcome := &testdata.TestOutcome{} cmd := &testdata.JsonRpcCommand{} - response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "error": map[string]interface{}{"code": float64(-32000), "message": "some error"}} - expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "error": nil} + response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "error": map[string]any{"code": float64(-32000), "message": "some error"}} + expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "error": nil} ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) @@ -115,8 +115,8 @@ func TestProcessResponse_EmptyExpected(t *testing.T) { outcome := &testdata.TestOutcome{} cmd := &testdata.JsonRpcCommand{} - response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1)} + response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]any{"jsonrpc": "2.0", "id": float64(1)} ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) @@ -132,8 +132,8 @@ func TestProcessResponse_DoNotCompareError(t *testing.T) { outcome := &testdata.TestOutcome{} cmd := &testdata.JsonRpcCommand{} - response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "error": map[string]interface{}{"code": float64(-32000), "message": "err1"}} - expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "error": map[string]interface{}{"code": float64(-32001), "message": "err2"}} + response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "error": map[string]any{"code": float64(-32000), "message": "err1"}} + expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "error": map[string]any{"code": float64(-32001), "message": "err2"}} ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) @@ -153,8 +153,8 @@ func TestProcessResponse_DiffMismatch_JsonDiffGo(t *testing.T) { outcome := &testdata.TestOutcome{} cmd := &testdata.JsonRpcCommand{} - response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} ProcessResponse(response, nil, expected, cfg, cmd, dir, daemonFile, expRspFile, diffFile, outcome) @@ -178,8 +178,8 @@ func TestProcessResponse_DiffMismatch_SingleTest_HasColoredDiff(t *testing.T) { outcome := &testdata.TestOutcome{} cmd := &testdata.JsonRpcCommand{} - response := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - expected := map[string]interface{}{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} + response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} + expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} ProcessResponse(response, nil, expected, cfg, cmd, dir, daemonFile, expRspFile, diffFile, outcome) @@ -194,8 +194,8 @@ func TestDumpJSONs_WritesFiles(t *testing.T) { expRspFile := filepath.Join(dir, "expected.json") metrics := &testdata.TestMetrics{} - response := map[string]interface{}{"result": "0x1"} - expected := map[string]interface{}{"result": "0x2"} + response := map[string]any{"result": "0x1"} + expected := map[string]any{"result": "0x2"} err := dumpJSONs(true, daemonFile, expRspFile, dir, response, expected, metrics) if err != nil { diff --git a/internal/config/config.go b/internal/config/config.go index ee01b293..9936a5da 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -165,8 +165,7 @@ func (c *Config) Validate() error { // Validate transport types if c.TransportType != "" { - types := strings.Split(c.TransportType, ",") - for _, t := range types { + for t := range strings.SplitSeq(c.TransportType, ",") { if !IsValidTransport(t) { return fmt.Errorf("invalid connection type: %s", t) } diff --git a/internal/filter/filter.go b/internal/filter/filter.go index 50a42b6e..c9a16289 100644 --- a/internal/filter/filter.go +++ b/internal/filter/filter.go @@ -1,6 +1,7 @@ package filter import ( + "slices" "strconv" "strings" ) @@ -181,12 +182,7 @@ func CheckTestNameForNumber(testName string, reqTestNumber int) bool { // ShouldCompareMessage checks if the message field should be compared for a given test. func (f *TestFilter) ShouldCompareMessage(testPath string) bool { fullPath := f.cfg.Net + "/" + testPath - for _, pattern := range testsNotComparedMessage { - if pattern == fullPath { - return false - } - } - return true + return !slices.Contains(testsNotComparedMessage, fullPath) } // ShouldCompareError checks if the error field should be compared for a given test. @@ -195,10 +191,5 @@ func (f *TestFilter) ShouldCompareError(testPath string) bool { return false } fullPath := f.cfg.Net + "/" + testPath - for _, pattern := range testsNotComparedError { - if pattern == fullPath { - return false - } - } - return true + return !slices.Contains(testsNotComparedError, fullPath) } diff --git a/internal/filter/filter_bench_test.go b/internal/filter/filter_bench_test.go index 7d02b0f8..d08af0fa 100644 --- a/internal/filter/filter_bench_test.go +++ b/internal/filter/filter_bench_test.go @@ -5,7 +5,7 @@ import "testing" func BenchmarkAPIUnderTest_NoFilters(b *testing.B) { f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1}) b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { f.APIUnderTest("eth_call", "eth_call/test_01.json") } } @@ -13,7 +13,7 @@ func BenchmarkAPIUnderTest_NoFilters(b *testing.B) { func BenchmarkAPIUnderTest_WithExactAPI(b *testing.B) { f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1, TestingAPIs: "eth_call"}) b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { f.APIUnderTest("eth_call", "eth_call/test_01.json") } } @@ -21,7 +21,7 @@ func BenchmarkAPIUnderTest_WithExactAPI(b *testing.B) { func BenchmarkAPIUnderTest_WithPattern(b *testing.B) { f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1, TestingAPIsWith: "eth_"}) b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { f.APIUnderTest("eth_call", "eth_call/test_01.json") } } @@ -29,7 +29,7 @@ func BenchmarkAPIUnderTest_WithPattern(b *testing.B) { func BenchmarkAPIUnderTest_WithExclude(b *testing.B) { f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1, ExcludeAPIList: "eth_call,eth_getBalance,debug_traceCall"}) b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { f.APIUnderTest("eth_getLogs", "eth_getLogs/test_01.json") } } @@ -37,7 +37,7 @@ func BenchmarkAPIUnderTest_WithExclude(b *testing.B) { func BenchmarkIsSkipped_DefaultList(b *testing.B) { f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1}) b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { f.IsSkipped("eth_call", "eth_call/test_01.json", 1) } } @@ -45,7 +45,7 @@ func BenchmarkIsSkipped_DefaultList(b *testing.B) { func BenchmarkIsSkipped_LatestBlock(b *testing.B) { f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1, TestsOnLatestBlock: true}) b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { f.IsSkipped("eth_call", "eth_call/test_01.json", 1) } } @@ -53,7 +53,7 @@ func BenchmarkIsSkipped_LatestBlock(b *testing.B) { func BenchmarkVerifyInLatestList(b *testing.B) { f := New(FilterConfig{Net: "mainnet", ReqTestNum: -1}) b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { f.VerifyInLatestList("eth_getBlockByNumber/test_01.json") } } diff --git a/internal/perf/perf_bench_test.go b/internal/perf/perf_bench_test.go index a465e43f..ddf85847 100644 --- a/internal/perf/perf_bench_test.go +++ b/internal/perf/perf_bench_test.go @@ -7,7 +7,7 @@ import ( func BenchmarkParseTestSequence(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { ParseTestSequence(DefaultTestSequence) } } @@ -15,7 +15,7 @@ func BenchmarkParseTestSequence(b *testing.B) { func BenchmarkFormatDuration_Microseconds(b *testing.B) { d := 500 * time.Microsecond b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { FormatDuration(d) } } @@ -23,7 +23,7 @@ func BenchmarkFormatDuration_Microseconds(b *testing.B) { func BenchmarkFormatDuration_Milliseconds(b *testing.B) { d := 150 * time.Millisecond b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { FormatDuration(d) } } @@ -31,21 +31,21 @@ func BenchmarkFormatDuration_Milliseconds(b *testing.B) { func BenchmarkFormatDuration_Seconds(b *testing.B) { d := 2500 * time.Millisecond b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { FormatDuration(d) } } func BenchmarkCountDigits(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { CountDigits(10000) } } func BenchmarkGetCompressionType(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { getCompressionType("test.tar.gz") getCompressionType("test.tar.bz2") getCompressionType("test.tar") diff --git a/internal/perf/perf_test.go b/internal/perf/perf_test.go index d7b4db6c..641eb70b 100644 --- a/internal/perf/perf_test.go +++ b/internal/perf/perf_test.go @@ -221,10 +221,9 @@ func TestGetCompressionType(t *testing.T) { func TestHardware_NonLinux(t *testing.T) { h := &Hardware{} // On macOS (darwin), all Linux-specific methods return "unknown" - if h.Vendor() != "unknown" && h.Vendor() != "" { - // On Linux, this would return actual vendor. On macOS, "unknown". - // Just make sure it doesn't panic. - } + // On Linux, Vendor() returns actual vendor. On macOS, "unknown". + // Just verify it doesn't panic. + _ = h.Vendor() _ = h.NormalizedVendor() _ = h.Product() _ = h.Board() diff --git a/internal/perf/report.go b/internal/perf/report.go index 73fba20e..c20c9cfb 100644 --- a/internal/perf/report.go +++ b/internal/perf/report.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/csv" "encoding/json" + "errors" "fmt" "io" "log" @@ -75,9 +76,9 @@ type JSONTestResult struct { // RepetitionInfo holds information for a single test repetition. type RepetitionInfo struct { - VegetaBinary string `json:"vegetaBinary"` - VegetaReport map[string]interface{} `json:"vegetaReport"` - VegetaReportHdrPlot string `json:"vegetaReportHdrPlot"` + VegetaBinary string `json:"vegetaBinary"` + VegetaReport map[string]any `json:"vegetaReport"` + VegetaReportHdrPlot string `json:"vegetaReportHdrPlot"` } // TestReport manages CSV and JSON report generation. @@ -332,7 +333,7 @@ func (tr *TestReport) writeTestReportToJSON(metrics *PerfMetrics) error { } // generateJSONReport generates a JSON report from a vegeta binary file. -func generateJSONReport(binaryFile string) (map[string]interface{}, error) { +func generateJSONReport(binaryFile string) (map[string]any, error) { file, err := os.Open(binaryFile) if err != nil { return nil, err @@ -344,7 +345,7 @@ func generateJSONReport(binaryFile string) (map[string]interface{}, error) { for { var result vegeta.Result if err := dec.Decode(&result); err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return nil, err @@ -353,13 +354,13 @@ func generateJSONReport(binaryFile string) (map[string]interface{}, error) { } metrics.Close() - report := map[string]interface{}{ + report := map[string]any{ "requests": metrics.Requests, "duration": metrics.Duration.Seconds(), "rate": metrics.Rate, "throughput": metrics.Throughput, "success": metrics.Success, - "latencies": map[string]interface{}{ + "latencies": map[string]any{ "min": metrics.Latencies.Min.Seconds(), "mean": metrics.Latencies.Mean.Seconds(), "p50": metrics.Latencies.P50.Seconds(), @@ -388,7 +389,7 @@ func generateHdrPlot(binaryFile string) (string, error) { for { var result vegeta.Result if err := dec.Decode(&result); err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return "", err diff --git a/internal/perf/sequence.go b/internal/perf/sequence.go index 926434b6..05d3a1ae 100644 --- a/internal/perf/sequence.go +++ b/internal/perf/sequence.go @@ -20,8 +20,7 @@ type TestSequence []TestSequenceItem func ParseTestSequence(sequence string) (TestSequence, error) { var items TestSequence - parts := strings.Split(sequence, ",") - for _, part := range parts { + for part := range strings.SplitSeq(sequence, ",") { qpsDur := strings.Split(part, ":") if len(qpsDur) != 2 { return nil, fmt.Errorf("invalid test sequence format: %s", part) diff --git a/internal/perf/vegeta.go b/internal/perf/vegeta.go index 74117881..b5c0c7a3 100644 --- a/internal/perf/vegeta.go +++ b/internal/perf/vegeta.go @@ -208,7 +208,7 @@ func (pt *PerfTest) ExecuteSequence(ctx context.Context, sequence TestSequence, } for _, test := range sequence { - for rep := 0; rep < pt.Config.Repetitions; rep++ { + for rep := range pt.Config.Repetitions { if test.QPS > 0 { if err := pt.Execute(ctx, testNumber, rep, tag, test.QPS, test.Duration, resultFormat); err != nil { return err diff --git a/internal/rpc/client_bench_test.go b/internal/rpc/client_bench_test.go index 6929db8d..782001ff 100644 --- a/internal/rpc/client_bench_test.go +++ b/internal/rpc/client_bench_test.go @@ -20,20 +20,20 @@ func BenchmarkCallHTTP(b *testing.B) { ctx := context.Background() b.ResetTimer() - for i := 0; i < b.N; i++ { - var result interface{} + for b.Loop() { + var result any client.Call(ctx, server.URL, request, &result) } } func BenchmarkValidateJsonRpcResponse(b *testing.B) { - response := map[string]interface{}{ + response := map[string]any{ "jsonrpc": "2.0", "id": float64(1), "result": "0x1", } b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { ValidateJsonRpcResponse(response) } } diff --git a/internal/rpc/client_test.go b/internal/rpc/client_test.go index f69e118e..cbabfe96 100644 --- a/internal/rpc/client_test.go +++ b/internal/rpc/client_test.go @@ -37,7 +37,7 @@ func TestCallHTTP_Success(t *testing.T) { } w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(map[string]interface{}{ + json.NewEncoder(w).Encode(map[string]any{ "jsonrpc": "2.0", "id": 1, "result": "0x1", @@ -62,7 +62,7 @@ func TestCallHTTP_Success(t *testing.T) { t.Error("UnmarshallingTime should be > 0") } - respMap, ok := response.(map[string]interface{}) + respMap, ok := response.(map[string]any) if !ok { t.Fatal("response is not a map") } @@ -76,7 +76,7 @@ func TestCallHTTP_JWTHeader(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { gotAuth = r.Header.Get("Authorization") w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(map[string]interface{}{ + json.NewEncoder(w).Encode(map[string]any{ "jsonrpc": "2.0", "id": 1, "result": nil, @@ -103,7 +103,7 @@ func TestCallHTTP_Compression(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { gotAcceptEncoding = r.Header.Get("Accept-Encoding") w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(map[string]interface{}{ + json.NewEncoder(w).Encode(map[string]any{ "jsonrpc": "2.0", "id": 1, "result": nil, diff --git a/internal/rpc/http.go b/internal/rpc/http.go index 754b1b2b..56ab261e 100644 --- a/internal/rpc/http.go +++ b/internal/rpc/http.go @@ -187,16 +187,16 @@ func validateJsonRpcResponseObject(obj map[string]any) error { // GetLatestBlockNumber queries eth_blockNumber and returns the result as uint64. func GetLatestBlockNumber(ctx context.Context, client *Client, url string) (uint64, Metrics, error) { type rpcReq struct { - Jsonrpc string `json:"jsonrpc"` - Method string `json:"method"` - Params []interface{} `json:"params"` - Id int `json:"id"` + Jsonrpc string `json:"jsonrpc"` + Method string `json:"method"` + Params []any `json:"params"` + Id int `json:"id"` } reqBytes, _ := jsonAPI.Marshal(rpcReq{ Jsonrpc: "2.0", Method: "eth_blockNumber", - Params: []interface{}{}, + Params: []any{}, Id: 1, }) @@ -206,7 +206,7 @@ func GetLatestBlockNumber(ctx context.Context, client *Client, url string) (uint return 0, metrics, err } - responseMap, ok := response.(map[string]interface{}) + responseMap, ok := response.(map[string]any) if !ok { return 0, metrics, fmt.Errorf("response is not a map: %v", response) } @@ -248,7 +248,7 @@ func GetConsistentLatestBlock(verbose int, server1URL, server2URL string, maxRet client := NewClient("http", "", verbose) var bn1, bn2 uint64 - for i := 0; i < maxRetries; i++ { + for i := range maxRetries { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) var err1, err2 error diff --git a/internal/runner/runner.go b/internal/runner/runner.go index 7a96609c..3936ecb6 100644 --- a/internal/runner/runner.go +++ b/internal/runner/runner.go @@ -93,7 +93,7 @@ func Run(ctx context.Context, cancelCtx context.CancelFunc, cfg *config.Config) } // Start workers - for i := 0; i < numWorkers; i++ { + for range numWorkers { wg.Add(1) go func() { defer wg.Done() @@ -161,15 +161,15 @@ func Run(ctx context.Context, cancelCtx context.CancelFunc, cfg *config.Config) }() // Main scheduling loop - testRep := 0 globalTestNumber := 0 availableTestedAPIs := discovery.TotalAPIs scheduledIndex := 0 + testRep := 0 - for testRep = 0; testRep < cfg.LoopNumber; testRep++ { + for testRep = range cfg.LoopNumber { select { case <-ctx.Done(): - break + goto done default: } @@ -181,7 +181,7 @@ func Run(ctx context.Context, cancelCtx context.CancelFunc, cfg *config.Config) for _, transportType := range transportTypes { select { case <-ctx.Done(): - break + goto done default: } diff --git a/internal/runner/runner_bench_test.go b/internal/runner/runner_bench_test.go index c3de53fe..8a1b29f4 100644 --- a/internal/runner/runner_bench_test.go +++ b/internal/runner/runner_bench_test.go @@ -17,7 +17,7 @@ func BenchmarkStats_AddSuccess(b *testing.B) { EqualCount: 1, } b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { s := &Stats{} s.AddSuccess(metrics) } @@ -26,7 +26,7 @@ func BenchmarkStats_AddSuccess(b *testing.B) { func BenchmarkShouldRunTest_NoFilters(b *testing.B) { cfg := config.NewConfig() b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { ShouldRunTest(cfg, "test_01.json", 1) } } @@ -35,14 +35,14 @@ func BenchmarkShouldRunTest_WithTestNumber(b *testing.B) { cfg := config.NewConfig() cfg.ReqTestNum = 5 b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { ShouldRunTest(cfg, "test_05.json", 5) } } func BenchmarkCheckTestNameForNumber(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { checkTestNameForNumber("test_01.json", 1) } } @@ -52,7 +52,7 @@ func BenchmarkIsStartTestReached(b *testing.B) { cfg.StartTest = "100" cfg.UpdateDirs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { IsStartTestReached(cfg, 50) } } diff --git a/internal/testdata/types.go b/internal/testdata/types.go index bbfcc55c..8cce9082 100644 --- a/internal/testdata/types.go +++ b/internal/testdata/types.go @@ -52,7 +52,7 @@ type JsonRpcResponseMetadata struct { // JsonRpcTestMetadata holds metadata about the test request/response. type JsonRpcTestMetadata struct { - Request interface{} `json:"request"` + Request any `json:"request"` Response *JsonRpcResponseMetadata `json:"response"` } From 57fe87422bff4e831ee115e44fd8491025f55f88 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 13 Feb 2026 19:18:01 +0100 Subject: [PATCH 61/87] Fix all lint issues in cmd/ and remove cmd/ exclusions from golangci config Co-Authored-By: Claude Opus 4.6 --- .golangci.yml | 11 --- cmd/archive/main.go | 11 ++- cmd/integration/archive/archive.go | 2 +- cmd/integration/archive/archive_test.go | 6 +- cmd/integration/jsondiff/diff.go | 93 +++++++++++-------------- cmd/integration/main.go | 4 +- 6 files changed, 55 insertions(+), 72 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index dd00531f..041f093b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -66,17 +66,6 @@ linters: # Test files: relax errcheck - linters: [errcheck] path: _test\.go - # Legacy cmd/ code - not part of v2 refactor - - path: cmd/archive/ - linters: [errcheck, ineffassign, wastedassign, staticcheck, govet] - - path: cmd/integration/main\.go - linters: [errcheck, errorlint] - - path: cmd/integration/jsondiff/ - linters: [ineffassign, modernize, intrange] - - path: cmd/integration/archive/ - linters: [errorlint, intrange, modernize] - - path: cmd/integration/jsondiff/ - linters: [wastedassign] # bodyclose false positives on websocket dial and raw HTTP handler - path: internal/rpc/websocket\.go linters: [bodyclose] diff --git a/cmd/archive/main.go b/cmd/archive/main.go index 89706fa1..aa9f68d6 100644 --- a/cmd/archive/main.go +++ b/cmd/archive/main.go @@ -103,7 +103,7 @@ func addFileToTar(tarWriter *tar.Writer, filePath, baseDir string) error { } // If baseDir is not empty, use the relative path, otherwise use the basename - nameInArchive := filePath + var nameInArchive string if baseDir != "" && strings.HasPrefix(filePath, baseDir) { nameInArchive = filePath[len(baseDir)+1:] } else { @@ -168,16 +168,21 @@ func autodetectCompression(archivePath string, inFile *os.File) (string, error) if err != nil { inFile.Close() inFile, err = os.Open(archivePath) + if err != nil { + return compressionType, err + } _, err = gzip.NewReader(inFile) if err == nil { // gzip is OK, rename compressionType = GzipCompression - err := inFile.Close() - if err != nil { + if err := inFile.Close(); err != nil { return compressionType, err } } else { inFile.Close() inFile, err = os.Open(archivePath) + if err != nil { + return compressionType, err + } _, err = tar.NewReader(bzip2.NewReader(inFile)).Next() inFile.Close() if err == nil { // bzip2 is OK, rename diff --git a/cmd/integration/archive/archive.go b/cmd/integration/archive/archive.go index 4f732c46..270faae5 100644 --- a/cmd/integration/archive/archive.go +++ b/cmd/integration/archive/archive.go @@ -125,7 +125,7 @@ func Extract(archivePath string, sanitizeExtension bool, f func(*tar.Reader) err tarReader := tar.NewReader(reader) header, err := tarReader.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { return fmt.Errorf("archive is empty") } if err != nil { diff --git a/cmd/integration/archive/archive_test.go b/cmd/integration/archive/archive_test.go index 5223c5f0..d9226639 100644 --- a/cmd/integration/archive/archive_test.go +++ b/cmd/integration/archive/archive_test.go @@ -444,7 +444,7 @@ func BenchmarkGetCompressionType(b *testing.B) { } b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { for _, f := range filenames { getCompressionKind(f) } @@ -483,7 +483,7 @@ func BenchmarkExtract(b *testing.B) { } b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { err := Extract(tmpFile, false, nullTarFunc) if err != nil { b.Fatalf("unexpected error: %v", err) @@ -495,7 +495,7 @@ func TestExtract_LargeJSON(t *testing.T) { // Create a large JSON payload var buf bytes.Buffer buf.WriteString("[") - for i := 0; i < 100_000; i++ { + for i := range 100_000 { if i > 0 { buf.WriteString(",") } diff --git a/cmd/integration/jsondiff/diff.go b/cmd/integration/jsondiff/diff.go index b6f71cdd..4ca3b0bf 100644 --- a/cmd/integration/jsondiff/diff.go +++ b/cmd/integration/jsondiff/diff.go @@ -22,8 +22,8 @@ const ( type Diff struct { Type DiffType Path string - OldValue interface{} - NewValue interface{} + OldValue any + NewValue any } // Options configures the diff behaviour @@ -41,19 +41,19 @@ type Options struct { } // DiffJSON computes the difference between two JSON objects -func DiffJSON(obj1, obj2 interface{}, opts *Options) map[string]interface{} { +func DiffJSON(obj1, obj2 any, opts *Options) map[string]any { if opts == nil { opts = &Options{} } - result := make(map[string]interface{}) + result := make(map[string]any) diff(obj1, obj2, "", result, opts) return result } // DiffString returns a human-readable string representation of differences -func DiffString(obj1, obj2 interface{}, opts *Options) string { +func DiffString(obj1, obj2 any, opts *Options) string { if opts == nil { opts = &Options{} } @@ -80,7 +80,7 @@ func DiffString(obj1, obj2 interface{}, opts *Options) string { } // ColoredString returns a colored diff string (for terminal output) -func ColoredString(obj1, obj2 interface{}, opts *Options) string { +func ColoredString(obj1, obj2 any, opts *Options) string { if opts == nil { opts = &Options{} } @@ -113,22 +113,22 @@ func ColoredString(obj1, obj2 interface{}, opts *Options) string { return sb.String() } -func diff(obj1, obj2 interface{}, path string, result map[string]interface{}, opts *Options) { +func diff(obj1, obj2 any, path string, result map[string]any, opts *Options) { // Handle nil cases if obj1 == nil && obj2 == nil { if opts.KeepUnchangedValues { - result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + result[path] = map[string]any{"__old": obj1, "__new": obj2} } return } if obj1 == nil { - result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + result[path] = map[string]any{"__old": obj1, "__new": obj2} return } if obj2 == nil { - result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + result[path] = map[string]any{"__old": obj1, "__new": obj2} return } @@ -137,7 +137,7 @@ func diff(obj1, obj2 interface{}, path string, result map[string]interface{}, op // If types are different, mark as changed if v1.Kind() != v2.Kind() { - result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + result[path] = map[string]any{"__old": obj1, "__new": obj2} return } @@ -148,19 +148,19 @@ func diff(obj1, obj2 interface{}, path string, result map[string]interface{}, op diffArrays(obj1, obj2, path, result, opts) default: if !reflect.DeepEqual(obj1, obj2) { - result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + result[path] = map[string]any{"__old": obj1, "__new": obj2} } else if opts.KeepUnchangedValues { - result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + result[path] = map[string]any{"__old": obj1, "__new": obj2} } } } -func diffMaps(obj1, obj2 interface{}, path string, result map[string]interface{}, opts *Options) { - m1, ok1 := obj1.(map[string]interface{}) - m2, ok2 := obj2.(map[string]interface{}) +func diffMaps(obj1, obj2 any, path string, result map[string]any, opts *Options) { + m1, ok1 := obj1.(map[string]any) + m2, ok2 := obj2.(map[string]any) if !ok1 || !ok2 { - result[path] = map[string]interface{}{"__old": obj1, "__new": obj2} + result[path] = map[string]any{"__old": obj1, "__new": obj2} return } @@ -192,58 +192,50 @@ func diffMaps(obj1, obj2 interface{}, path string, result map[string]interface{} } if !exists1 { - result[newPath] = map[string]interface{}{"__new": v2} + result[newPath] = map[string]any{"__new": v2} } else if !exists2 { - result[newPath] = map[string]interface{}{"__old": v1} + result[newPath] = map[string]any{"__old": v1} } else { diff(v1, v2, newPath, result, opts) } } } -func diffArrays(obj1, obj2 interface{}, path string, result map[string]interface{}, opts *Options) { +func diffArrays(obj1, obj2 any, path string, result map[string]any, opts *Options) { v1 := reflect.ValueOf(obj1) v2 := reflect.ValueOf(obj2) // Sort arrays if required - arr1 := obj1 - arr2 := obj2 - if opts.SortArrays { - arr1 = sortArrayIfPrimitive(obj1) - arr2 = sortArrayIfPrimitive(obj2) - v1 = reflect.ValueOf(arr1) - v2 = reflect.ValueOf(arr2) + v1 = reflect.ValueOf(sortArrayIfPrimitive(obj1)) + v2 = reflect.ValueOf(sortArrayIfPrimitive(obj2)) } len1 := v1.Len() len2 := v2.Len() - maxLen := len1 - if len2 > maxLen { - maxLen = len2 - } + maxLen := max(len1, len2) - for i := 0; i < maxLen; i++ { + for i := range maxLen { newPath := fmt.Sprintf("%s[%d]", path, i) if i >= len1 { - result[newPath] = map[string]interface{}{"__new": v2.Index(i).Interface()} + result[newPath] = map[string]any{"__new": v2.Index(i).Interface()} } else if i >= len2 { - result[newPath] = map[string]interface{}{"__old": v1.Index(i).Interface()} + result[newPath] = map[string]any{"__old": v1.Index(i).Interface()} } else { diff(v1.Index(i).Interface(), v2.Index(i).Interface(), newPath, result, opts) } } } -func collectDiffs(obj1, obj2 interface{}, path string) []Diff { +func collectDiffs(obj1, obj2 any, path string) []Diff { var diffs []Diff collectDiffsRec(obj1, obj2, path, &diffs) return diffs } -func collectDiffsRec(obj1, obj2 interface{}, path string, diffs *[]Diff) { +func collectDiffsRec(obj1, obj2 any, path string, diffs *[]Diff) { if obj1 == nil && obj2 == nil { *diffs = append(*diffs, Diff{Type: DiffEqual, Path: path, NewValue: obj2}) return @@ -281,9 +273,9 @@ func collectDiffsRec(obj1, obj2 interface{}, path string, diffs *[]Diff) { } } -func collectMapDiffs(obj1, obj2 interface{}, path string, diffs *[]Diff) { - m1, ok1 := obj1.(map[string]interface{}) - m2, ok2 := obj2.(map[string]interface{}) +func collectMapDiffs(obj1, obj2 any, path string, diffs *[]Diff) { + m1, ok1 := obj1.(map[string]any) + m2, ok2 := obj2.(map[string]any) if !ok1 || !ok2 { *diffs = append(*diffs, Diff{Type: DiffUpdate, Path: path, OldValue: obj1, NewValue: obj2}) @@ -323,19 +315,16 @@ func collectMapDiffs(obj1, obj2 interface{}, path string, diffs *[]Diff) { } } -func collectArrayDiffs(obj1, obj2 interface{}, path string, diffs *[]Diff) { +func collectArrayDiffs(obj1, obj2 any, path string, diffs *[]Diff) { v1 := reflect.ValueOf(obj1) v2 := reflect.ValueOf(obj2) len1 := v1.Len() len2 := v2.Len() - maxLen := len1 - if len2 > maxLen { - maxLen = len2 - } + maxLen := max(len1, len2) - for i := 0; i < maxLen; i++ { + for i := range maxLen { newPath := fmt.Sprintf("%s[%d]", path, i) if i >= len1 { @@ -348,7 +337,7 @@ func collectArrayDiffs(obj1, obj2 interface{}, path string, diffs *[]Diff) { } } -func sortArrayIfPrimitive(arr interface{}) interface{} { +func sortArrayIfPrimitive(arr any) any { v := reflect.ValueOf(arr) if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { return arr @@ -365,8 +354,8 @@ func sortArrayIfPrimitive(arr interface{}) interface{} { } // Create a copy and sort it - slice := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { + slice := make([]any, v.Len()) + for i := range v.Len() { slice[i] = v.Index(i).Interface() } @@ -377,7 +366,7 @@ func sortArrayIfPrimitive(arr interface{}) interface{} { return slice } -func isPrimitive(v interface{}) bool { +func isPrimitive(v any) bool { if v == nil { return true } @@ -392,7 +381,7 @@ func isPrimitive(v interface{}) bool { } } -func comparePrimitives(a, b interface{}) int { +func comparePrimitives(a, b any) int { if a == nil && b == nil { return 0 } @@ -453,7 +442,7 @@ func comparePrimitives(a, b interface{}) int { } } -func formatValue(v interface{}) string { +func formatValue(v any) string { if v == nil { return "null" } @@ -461,7 +450,7 @@ func formatValue(v interface{}) string { switch val := v.(type) { case string: return fmt.Sprintf(`"%s"`, val) - case map[string]interface{}, []interface{}: + case map[string]any, []any: b, _ := json.Marshal(val) return string(b) default: diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 6c603fb1..c277c6ad 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -159,11 +159,11 @@ func parseFlags(cfg *config.Config) error { if *createJWT != "" { if err := config.GenerateJWTSecret(*createJWT, 64); err != nil { - return fmt.Errorf("failed to create JWT secret: %v", err) + return fmt.Errorf("failed to create JWT secret: %w", err) } secret, err := config.GetJWTSecret(*createJWT) if err != nil { - return fmt.Errorf("failed to read JWT secret: %v", err) + return fmt.Errorf("failed to read JWT secret: %w", err) } cfg.JWTSecret = secret } else if *jwtFile != "" { From 86f798f62d26707f5c04ca249d0ba3ddcd63fb82 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 13 Feb 2026 19:21:08 +0100 Subject: [PATCH 62/87] Move archive and jsondiff packages from cmd/integration/ to internal/ Co-Authored-By: Claude Opus 4.6 --- CLAUDE.md | 20 +++++++++++-------- .../archive/archive.go | 0 .../archive/archive_test.go | 0 internal/compare/comparator.go | 2 +- .../integration => internal}/jsondiff/diff.go | 0 .../jsondiff/diff_test.go | 0 internal/testdata/loader.go | 2 +- 7 files changed, 14 insertions(+), 10 deletions(-) rename {cmd/integration => internal}/archive/archive.go (100%) rename {cmd/integration => internal}/archive/archive_test.go (100%) rename {cmd/integration => internal}/jsondiff/diff.go (100%) rename {cmd/integration => internal}/jsondiff/diff_test.go (100%) diff --git a/CLAUDE.md b/CLAUDE.md index dacfb554..b2d4cb88 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -13,8 +13,8 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co go build -o ./build/bin/rpc_int ./cmd/integration/main.go # Run Go unit tests -go test ./cmd/integration/archive/ -go test ./cmd/integration/jsondiff/ +go test ./internal/archive/ +go test ./internal/jsondiff/ # Run Python unit tests pytest @@ -43,12 +43,16 @@ pytest 5. Compares actual response against expected response using JSON diff 6. Reports results with colored output, saves diffs to `{network}/results/` -**Supporting packages:** -- `cmd/integration/archive/` — Extract test fixtures from tar/gzip/bzip2 archives -- `cmd/integration/jsondiff/` — Pure Go JSON diff with colored output -- `cmd/integration/rpc/` — HTTP JSON-RPC client with JWT auth and compression support - -**Active v2 refactor** (branch `canepat/v2`): `integration/cli/` is a restructured version of the test runner using `urfave/cli/v2`, splitting the monolithic main.go into focused modules: `flags.go` (config), `test_runner.go` (orchestration), `test_execution.go` (per-test logic), `test_comparator.go` (response comparison), `test_filter.go` (filtering), `rpc.go` (client), `utils.go`. +**Internal packages** under `internal/`: +- `internal/archive/` — Extract test fixtures from tar/gzip/bzip2 archives +- `internal/jsondiff/` — Pure Go JSON diff with colored output +- `internal/rpc/` — HTTP/WebSocket JSON-RPC client with JWT auth and compression support +- `internal/compare/` — Response comparison (exact match, JSON diff, external diff) +- `internal/config/` — Configuration, CLI flag parsing, JWT secret management +- `internal/filter/` — Test filtering (API name, pattern, exclusion, latest block) +- `internal/runner/` — Parallel test orchestration (worker pool, scheduling, stats) +- `internal/testdata/` — Test discovery, fixture loading, types +- `internal/perf/` — Performance test support (Vegeta integration, reporting) **Test fixture format** — each test is a JSON file (or tarball containing JSON): ```json diff --git a/cmd/integration/archive/archive.go b/internal/archive/archive.go similarity index 100% rename from cmd/integration/archive/archive.go rename to internal/archive/archive.go diff --git a/cmd/integration/archive/archive_test.go b/internal/archive/archive_test.go similarity index 100% rename from cmd/integration/archive/archive_test.go rename to internal/archive/archive_test.go diff --git a/internal/compare/comparator.go b/internal/compare/comparator.go index e7cf01ff..9f03f1de 100644 --- a/internal/compare/comparator.go +++ b/internal/compare/comparator.go @@ -16,7 +16,7 @@ import ( "github.com/josephburnett/jd/v2" jsoniter "github.com/json-iterator/go" - "github.com/erigontech/rpc-tests/cmd/integration/jsondiff" + "github.com/erigontech/rpc-tests/internal/jsondiff" "github.com/erigontech/rpc-tests/internal/config" "github.com/erigontech/rpc-tests/internal/testdata" ) diff --git a/cmd/integration/jsondiff/diff.go b/internal/jsondiff/diff.go similarity index 100% rename from cmd/integration/jsondiff/diff.go rename to internal/jsondiff/diff.go diff --git a/cmd/integration/jsondiff/diff_test.go b/internal/jsondiff/diff_test.go similarity index 100% rename from cmd/integration/jsondiff/diff_test.go rename to internal/jsondiff/diff_test.go diff --git a/internal/testdata/loader.go b/internal/testdata/loader.go index da5589f5..9b3bf50d 100644 --- a/internal/testdata/loader.go +++ b/internal/testdata/loader.go @@ -11,7 +11,7 @@ import ( jsoniter "github.com/json-iterator/go" - "github.com/erigontech/rpc-tests/cmd/integration/archive" + "github.com/erigontech/rpc-tests/internal/archive" ) var json = jsoniter.ConfigCompatibleWithStandardLibrary From ae31b095b9d668672b483d0cf37af07460089bcf Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 13 Feb 2026 19:36:33 +0100 Subject: [PATCH 63/87] Print test results in scheduling order instead of completion order Buffer out-of-order results and flush consecutively by index so parallel execution still runs concurrently but console output matches Python's deterministic ordering. Co-Authored-By: Claude Opus 4.6 --- internal/runner/runner.go | 68 +++++++++++++++++++++------------- internal/runner/runner_test.go | 65 ++++++++++++++++++++++++++++++++ 2 files changed, 108 insertions(+), 25 deletions(-) diff --git a/internal/runner/runner.go b/internal/runner/runner.go index 3936ecb6..2a78874b 100644 --- a/internal/runner/runner.go +++ b/internal/runner/runner.go @@ -112,7 +112,7 @@ func Run(ctx context.Context, cancelCtx context.CancelFunc, cfg *config.Config) }() } - // Results collector with buffered stdout + // Results collector with buffered stdout — prints in scheduling order var resultsWg sync.WaitGroup resultsWg.Add(1) stats := &Stats{} @@ -120,40 +120,28 @@ func Run(ctx context.Context, cancelCtx context.CancelFunc, cfg *config.Config) defer resultsWg.Done() w := bufio.NewWriterSize(os.Stdout, 64*1024) defer w.Flush() + pending := make(map[int]testdata.TestResult) + nextIndex := 0 for { select { case result, ok := <-resultsChan: if !ok { return } - file := fmt.Sprintf("%-60s", result.Test.Name) - tt := fmt.Sprintf("%-15s", result.Test.TransportType) - fmt.Fprintf(w, "%04d. %s::%s ", result.Test.Number, tt, file) - - if result.Outcome.Success { - stats.AddSuccess(result.Outcome.Metrics) - if cfg.VerboseLevel > 0 { - fmt.Fprintln(w, "OK") - } else { - fmt.Fprint(w, "OK\r") - } - } else { - stats.AddFailure() - if result.Outcome.Error != nil { - fmt.Fprintf(w, "failed: %s\n", result.Outcome.Error.Error()) - if errors.Is(result.Outcome.Error, compare.ErrDiffMismatch) && result.Outcome.ColoredDiff != "" { - fmt.Fprint(w, result.Outcome.ColoredDiff) - } - } else { - fmt.Fprintf(w, "failed: no error\n") + pending[result.Test.Index] = result + // Flush all consecutive results starting from nextIndex + for { + r, exists := pending[nextIndex] + if !exists { + break } - if cfg.ExitOnFail { - w.Flush() - cancelCtx() + delete(pending, nextIndex) + nextIndex++ + printResult(w, &r, stats, cfg, cancelCtx) + if cfg.ExitOnFail && stats.FailedTests > 0 { return } } - w.Flush() case <-ctx.Done(): return } @@ -283,3 +271,33 @@ done: } return 0, nil } + +func printResult(w *bufio.Writer, result *testdata.TestResult, stats *Stats, cfg *config.Config, cancelCtx context.CancelFunc) { + file := fmt.Sprintf("%-60s", result.Test.Name) + tt := fmt.Sprintf("%-15s", result.Test.TransportType) + fmt.Fprintf(w, "%04d. %s::%s ", result.Test.Number, tt, file) + + if result.Outcome.Success { + stats.AddSuccess(result.Outcome.Metrics) + if cfg.VerboseLevel > 0 { + fmt.Fprintln(w, "OK") + } else { + fmt.Fprint(w, "OK\r") + } + } else { + stats.AddFailure() + if result.Outcome.Error != nil { + fmt.Fprintf(w, "failed: %s\n", result.Outcome.Error.Error()) + if errors.Is(result.Outcome.Error, compare.ErrDiffMismatch) && result.Outcome.ColoredDiff != "" { + fmt.Fprint(w, result.Outcome.ColoredDiff) + } + } else { + fmt.Fprintf(w, "failed: no error\n") + } + if cfg.ExitOnFail { + w.Flush() + cancelCtx() + } + } + w.Flush() +} diff --git a/internal/runner/runner_test.go b/internal/runner/runner_test.go index 498361f1..8d32059e 100644 --- a/internal/runner/runner_test.go +++ b/internal/runner/runner_test.go @@ -1,6 +1,11 @@ package runner import ( + "bufio" + "bytes" + "context" + "fmt" + "strings" "testing" "time" @@ -147,3 +152,63 @@ func TestCheckTestNameForNumber(t *testing.T) { } } } + +func TestPrintResult_OrderedOutput(t *testing.T) { + const numTests = 50 + + // Send results in reverse order to simulate out-of-order parallel completion + resultsChan := make(chan testdata.TestResult, numTests) + for i := numTests - 1; i >= 0; i-- { + resultsChan <- testdata.TestResult{ + Outcome: testdata.TestOutcome{Success: true}, + Test: &testdata.TestDescriptor{ + Name: "eth_call/test_01.json", + Number: i + 1, + TransportType: "http", + Index: i, + }, + } + } + close(resultsChan) + + // Run the collector logic + cfg := config.NewConfig() + cfg.VerboseLevel = 1 + _, cancel := context.WithCancel(context.Background()) + defer cancel() + + var buf bytes.Buffer + w := bufio.NewWriter(&buf) + stats := &Stats{} + pending := make(map[int]testdata.TestResult) + nextIndex := 0 + + for result := range resultsChan { + pending[result.Test.Index] = result + for { + r, exists := pending[nextIndex] + if !exists { + break + } + delete(pending, nextIndex) + nextIndex++ + printResult(w, &r, stats, cfg, cancel) + } + } + w.Flush() + + // Verify output is in order + lines := strings.Split(strings.TrimSpace(buf.String()), "\n") + if len(lines) != numTests { + t.Fatalf("expected %d lines, got %d", numTests, len(lines)) + } + for i, line := range lines { + expectedPrefix := fmt.Sprintf("%04d.", i+1) + if !strings.HasPrefix(line, expectedPrefix) { + t.Errorf("line %d: expected prefix %q, got %q", i, expectedPrefix, line[:min(len(line), 10)]) + } + } + if stats.SuccessTests != numTests { + t.Errorf("SuccessTests: got %d, want %d", stats.SuccessTests, numTests) + } +} From 353a1caf4f041e5bfb4c55bed03568fba5122017 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Fri, 13 Feb 2026 22:11:41 +0100 Subject: [PATCH 64/87] Use dsnet/compress/bzip2 (CGo) instead of stdlib pure-Go bzip2 Profile showed compress/bzip2 consuming 32.8s CPU (30.7%) on pure-Go decompression of 221 test archives. The dsnet library wraps libbzip2 via CGo and is already a project dependency. CPU user time drops from ~90s to ~73s. Co-Authored-By: Claude Opus 4.6 --- internal/archive/archive.go | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/internal/archive/archive.go b/internal/archive/archive.go index 270faae5..90fcbc50 100644 --- a/internal/archive/archive.go +++ b/internal/archive/archive.go @@ -2,13 +2,14 @@ package archive import ( "archive/tar" - "compress/bzip2" "compress/gzip" "errors" "fmt" "io" "os" "strings" + + "github.com/dsnet/compress/bzip2" ) // Compression defines the supported compression types @@ -59,7 +60,13 @@ func autodetectCompression(inFile *os.File) (Compression, error) { if err != nil { return compressionType, err } - _, err = tar.NewReader(bzip2.NewReader(inFile)).Next() + bzReader, bzErr := bzip2.NewReader(inFile, nil) + if bzErr == nil { + _, err = tar.NewReader(bzReader).Next() + bzReader.Close() + } else { + err = bzErr + } if err == nil { compressionType = Bzip2Compression } @@ -118,7 +125,12 @@ func Extract(archivePath string, sanitizeExtension bool, f func(*tar.Reader) err }(gzReader) reader = gzReader case Bzip2Compression: - reader = bzip2.NewReader(inputFile) + bzReader, bzErr := bzip2.NewReader(inputFile, nil) + if bzErr != nil { + return fmt.Errorf("failed to create bzip2 reader: %w", bzErr) + } + defer bzReader.Close() + reader = bzReader case NoCompression: reader = inputFile } From 46a0dafa9a7693feee988e532cd45f3a21d19807 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Sat, 14 Feb 2026 09:20:31 +0100 Subject: [PATCH 65/87] Port 9 Python scripts to Go subcommands of rpc_int Add subcommand dispatch to main() that delegates to urfave/cli when os.Args[1] matches a known subcommand, preserving full backward compatibility with existing flag-based behavior. New subcommands: block-by-number, empty-blocks, filter-changes, latest-block-logs, subscriptions, graphql, replay-request, replay-tx, scan-block-receipts. New packages: internal/tools (subcommand registry + implementations), internal/rpc/wsconn.go (persistent WebSocket connection), internal/eth (pure-Go RLP encoding + MPT trie for receipt root verification). Co-Authored-By: Claude Opus 4.6 --- .claude/skills/erigon-run/SKILL.md | 76 +++ cmd/integration/main.go | 13 + go.mod | 12 +- go.sum | 16 +- internal/eth/receipt.go | 692 ++++++++++++++++++++++++++ internal/eth/receipt_test.go | 201 ++++++++ internal/rpc/wsconn.go | 66 +++ internal/tools/block_by_number.go | 110 ++++ internal/tools/empty_blocks.go | 201 ++++++++ internal/tools/filter_changes.go | 117 +++++ internal/tools/graphql.go | 320 ++++++++++++ internal/tools/latest_block_logs.go | 191 +++++++ internal/tools/replay_request.go | 221 ++++++++ internal/tools/replay_tx.go | 192 +++++++ internal/tools/scan_block_receipts.go | 356 +++++++++++++ internal/tools/subscriptions.go | 145 ++++++ internal/tools/tools.go | 35 ++ internal/tools/tools_test.go | 49 ++ 18 files changed, 3003 insertions(+), 10 deletions(-) create mode 100644 .claude/skills/erigon-run/SKILL.md create mode 100644 internal/eth/receipt.go create mode 100644 internal/eth/receipt_test.go create mode 100644 internal/rpc/wsconn.go create mode 100644 internal/tools/block_by_number.go create mode 100644 internal/tools/empty_blocks.go create mode 100644 internal/tools/filter_changes.go create mode 100644 internal/tools/graphql.go create mode 100644 internal/tools/latest_block_logs.go create mode 100644 internal/tools/replay_request.go create mode 100644 internal/tools/replay_tx.go create mode 100644 internal/tools/scan_block_receipts.go create mode 100644 internal/tools/subscriptions.go create mode 100644 internal/tools/tools.go create mode 100644 internal/tools/tools_test.go diff --git a/.claude/skills/erigon-run/SKILL.md b/.claude/skills/erigon-run/SKILL.md new file mode 100644 index 00000000..35e61632 --- /dev/null +++ b/.claude/skills/erigon-run/SKILL.md @@ -0,0 +1,76 @@ +--- +name: erigon-run +description: Use to run Erigon on an existing datadir. Use when the user wants to exercise the `rpc-tests` binaries (`rpc_int`, `rpc_perf`) against real server. +allowed-tools: Bash, Read, Glob +--- + +# Erigon Run + +## Overview +The `erigon` command runs Erigon on an existing Erigon datadir. + +## Command Syntax + +```bash +cd && ./build/bin/erigon --datadir= --http.api admin,debug,eth,parity,erigon,trace,web3,txpool,ots,net --ws [other-flags] +``` + +## Required Flags + +- `--datadir`: Path to the Erigon datadir (required) + +## Usage Patterns + +### Change HTTP port +```bash +cd && ./build/bin/erigon --datadir= --http.port=8546 +``` + +### WebSocket support +```bash +cd && ./build/bin/erigon --datadir= --ws +``` + +## Important Considerations + +### Before Running +1. **Ask for Erigon home**: Ask the user which Erigon home folder to use if not already provided +2. **Stop Erigon and RpcDaemon**: Ensure Erigon and/or RpcDaemon are not running on the target datadir +3. **Ensure Erigon binary is built**: run `make erigon` to build it + +### After Running +1. Wait until the HTTP port (value provided with --http.port or default 8545) is reachable + + +## Workflow + +When the user wants to run Erigon: + +1. **Confirm parameters** + - Ask for Erigon home path to use if not provided or know in context + - Ask for target datadir path + +2. **Safety checks** + - Verify Erigon home exists + - Verify datadir exists + - Check if Erigon and/or RpcDaemon are running (should not be) + + +## Error Handling + +Common issues: +- **"datadir not found"**: Verify the path is correct +- **"database locked"**: Stop Erigon process first + + +## Examples + +### Example 1: All API namespaces and WebSocket enabled +```bash +cd ../erigon_devel && ./build/bin/erigon --datadir=~/Library/erigon-eth-mainnet --http.api admin,debug,eth,parity,erigon,trace,web3,txpool,ots,net --ws +``` + + +## Tips + +- If building from source, use `make erigon` within to build the binary at `build/bin/erigon` diff --git a/cmd/integration/main.go b/cmd/integration/main.go index c277c6ad..9c876ab9 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -13,6 +13,8 @@ import ( "github.com/erigontech/rpc-tests/internal/config" "github.com/erigontech/rpc-tests/internal/runner" + "github.com/erigontech/rpc-tests/internal/tools" + "github.com/urfave/cli/v2" ) func parseFlags(cfg *config.Config) error { @@ -310,5 +312,16 @@ func runMain() int { } func main() { + if len(os.Args) > 1 && tools.IsSubcommand(os.Args[1]) { + app := &cli.App{ + Name: "rpc_int", + Commands: tools.Commands(), + } + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + return + } os.Exit(runMain()) } diff --git a/go.mod b/go.mod index dadda692..672be7f8 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/erigontech/rpc-tests -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require ( github.com/dsnet/compress v0.0.1 @@ -10,6 +12,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/tsenart/vegeta/v12 v12.13.0 github.com/urfave/cli/v2 v2.27.7 + golang.org/x/crypto v0.48.0 ) require ( @@ -26,8 +29,9 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect - golang.org/x/net v0.43.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/text v0.28.0 // indirect + golang.org/x/net v0.49.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.41.0 // indirect + golang.org/x/text v0.34.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index fb47df3b..432aaa6a 100644 --- a/go.sum +++ b/go.sum @@ -63,16 +63,20 @@ github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU= github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca h1:PupagGYwj8+I4ubCxcmcBRk3VlUWtTg5huQpZR9flmE= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= diff --git a/internal/eth/receipt.go b/internal/eth/receipt.go new file mode 100644 index 00000000..ee73203e --- /dev/null +++ b/internal/eth/receipt.go @@ -0,0 +1,692 @@ +package eth + +import ( + "encoding/hex" + "fmt" + "math/big" + "strings" + + "golang.org/x/crypto/sha3" +) + +// ComputeReceiptsRoot computes the MPT root hash from a list of receipt maps +// returned by eth_getBlockReceipts. +func ComputeReceiptsRoot(receipts []map[string]any) (string, error) { + if len(receipts) == 0 { + // Empty trie root = Keccak256(RLP("")) + return "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", nil + } + + trie := newMPT() + + for i, receipt := range receipts { + encoded, err := encodeReceipt(receipt) + if err != nil { + return "", fmt.Errorf("encode receipt %d: %w", i, err) + } + + key := rlpEncodeUint(uint64(i)) + trie.put(key, encoded) + } + + root := trie.rootHash() + return "0x" + hex.EncodeToString(root), nil +} + +func encodeReceipt(receipt map[string]any) ([]byte, error) { + // Extract fields + statusVal := receipt["status"] + cumulativeGasUsed := receipt["cumulativeGasUsed"] + logsBloom := receipt["logsBloom"] + logs := receipt["logs"] + receiptType := receipt["type"] + + // Build logs list + logsArr, _ := logs.([]any) + var encodedLogs [][]byte + for _, l := range logsArr { + logMap, ok := l.(map[string]any) + if !ok { + continue + } + encodedLog, err := encodeLog(logMap) + if err != nil { + return nil, err + } + encodedLogs = append(encodedLogs, encodedLog) + } + + // Build receipt RLP list + var items [][]byte + + // Status or root + if statusVal != nil { + statusHex, _ := statusVal.(string) + statusInt := hexToUint64(statusHex) + items = append(items, rlpEncodeUint(statusInt)) + } else if root, ok := receipt["root"].(string); ok { + rootBytes := hexToBytes(root) + items = append(items, rlpEncodeBytes(rootBytes)) + } else { + return nil, fmt.Errorf("receipt has neither 'status' nor 'root' field") + } + + // cumulativeGasUsed + gasHex, _ := cumulativeGasUsed.(string) + gasVal := hexToUint64(gasHex) + items = append(items, rlpEncodeUint(gasVal)) + + // logsBloom + bloomHex, _ := logsBloom.(string) + bloomBytes := hexToBytes(bloomHex) + items = append(items, rlpEncodeBytes(bloomBytes)) + + // logs + items = append(items, rlpEncodeList(encodedLogs)) + + value := rlpEncodeListFromRLP(items) + + // Receipt type: non-legacy types are prefixed with the type byte + typeHex, _ := receiptType.(string) + typeVal := hexToUint64(typeHex) + if typeVal != 0 { + value = append([]byte{byte(typeVal)}, value...) + } + + return value, nil +} + +func encodeLog(logMap map[string]any) ([]byte, error) { + address, _ := logMap["address"].(string) + topicsRaw, _ := logMap["topics"].([]any) + data, _ := logMap["data"].(string) + + var items [][]byte + + // address + items = append(items, rlpEncodeBytes(hexToBytes(address))) + + // topics + var topicItems [][]byte + for _, t := range topicsRaw { + topicStr, _ := t.(string) + topicItems = append(topicItems, rlpEncodeBytes(hexToBytes(topicStr))) + } + items = append(items, rlpEncodeListFromRLP(topicItems)) + + // data + items = append(items, rlpEncodeBytes(hexToBytes(data))) + + return rlpEncodeListFromRLP(items), nil +} + +// --- RLP encoding --- + +func rlpEncodeUint(val uint64) []byte { + if val == 0 { + return []byte{0x80} + } + if val < 128 { + return []byte{byte(val)} + } + b := big.NewInt(0).SetUint64(val).Bytes() + return rlpEncodeBytes(b) +} + +func rlpEncodeBytes(b []byte) []byte { + if len(b) == 1 && b[0] < 128 { + return b + } + if len(b) <= 55 { + return append([]byte{byte(0x80 + len(b))}, b...) + } + lenBytes := encodeLength(len(b)) + prefix := append([]byte{byte(0xb7 + len(lenBytes))}, lenBytes...) + return append(prefix, b...) +} + +func rlpEncodeList(items [][]byte) []byte { + return rlpEncodeListFromRLP(encodeItemsToRLP(items)) +} + +func encodeItemsToRLP(items [][]byte) [][]byte { + var rlpItems [][]byte + for _, item := range items { + rlpItems = append(rlpItems, rlpEncodeBytes(item)) + } + return rlpItems +} + +func rlpEncodeListFromRLP(rlpItems [][]byte) []byte { + var payload []byte + for _, item := range rlpItems { + payload = append(payload, item...) + } + if len(payload) <= 55 { + return append([]byte{byte(0xc0 + len(payload))}, payload...) + } + lenBytes := encodeLength(len(payload)) + prefix := append([]byte{byte(0xf7 + len(lenBytes))}, lenBytes...) + return append(prefix, payload...) +} + +func encodeLength(n int) []byte { + if n == 0 { + return []byte{0} + } + b := big.NewInt(int64(n)).Bytes() + return b +} + +// --- Hex utilities --- + +func hexToBytes(s string) []byte { + s = strings.TrimPrefix(s, "0x") + if len(s)%2 != 0 { + s = "0" + s + } + b, _ := hex.DecodeString(s) + return b +} + +func hexToUint64(s string) uint64 { + s = strings.TrimPrefix(s, "0x") + if s == "" { + return 0 + } + var result uint64 + for _, c := range s { + result <<= 4 + switch { + case c >= '0' && c <= '9': + result |= uint64(c - '0') + case c >= 'a' && c <= 'f': + result |= uint64(c - 'a' + 10) + case c >= 'A' && c <= 'F': + result |= uint64(c - 'A' + 10) + } + } + return result +} + +// --- MPT (Modified Merkle-Patricia Trie) --- + +func keccak256(data []byte) []byte { + h := sha3.NewLegacyKeccak256() + h.Write(data) + return h.Sum(nil) +} + +// mpt is a simple implementation of Ethereum's Modified Merkle-Patricia Trie +// sufficient for computing root hashes of receipt tries. +type mpt struct { + db map[string][]byte + root []byte +} + +func newMPT() *mpt { + return &mpt{ + db: make(map[string][]byte), + } +} + +func (t *mpt) put(key, value []byte) { + nibbles := bytesToNibbles(key) + t.root = t.insert(t.root, nibbles, value) +} + +func (t *mpt) rootHash() []byte { + if t.root == nil { + return keccak256([]byte{0x80}) + } + if len(t.root) < 32 { + return keccak256(t.root) + } + return t.root +} + +func (t *mpt) insert(node []byte, nibbles []byte, value []byte) []byte { + if node == nil { + // Create a leaf node + return t.hashNode(encodeLeaf(nibbles, value)) + } + + // Decode the existing node + existing := t.resolveNode(node) + if existing == nil { + return t.hashNode(encodeLeaf(nibbles, value)) + } + + nodeType, decoded := decodeNode(existing) + + switch nodeType { + case nodeTypeLeaf: + existingNibbles := decoded[0] + existingValue := decoded[1] + + // Find common prefix + commonLen := commonPrefixLen(nibbles, existingNibbles) + + if commonLen == len(nibbles) && commonLen == len(existingNibbles) { + // Same key, update value + return t.hashNode(encodeLeaf(nibbles, value)) + } + + // Create a branch node + branch := make([][]byte, 17) + for i := range 17 { + branch[i] = nil + } + + if commonLen == len(existingNibbles) { + branch[16] = existingValue + } else { + branch[existingNibbles[commonLen]] = t.hashNode(encodeLeaf(existingNibbles[commonLen+1:], existingValue)) + } + + if commonLen == len(nibbles) { + branch[16] = value + } else { + branch[nibbles[commonLen]] = t.hashNode(encodeLeaf(nibbles[commonLen+1:], value)) + } + + branchNode := t.hashNode(encodeBranch(branch)) + + if commonLen > 0 { + return t.hashNode(encodeExtension(nibbles[:commonLen], branchNode)) + } + return branchNode + + case nodeTypeExtension: + extNibbles := decoded[0] + childRef := decoded[1] + + commonLen := commonPrefixLen(nibbles, extNibbles) + + if commonLen == len(extNibbles) { + // Key starts with extension prefix, insert into child + newChild := t.insert(childRef, nibbles[commonLen:], value) + return t.hashNode(encodeExtension(extNibbles, newChild)) + } + + // Split the extension + branch := make([][]byte, 17) + for i := range 17 { + branch[i] = nil + } + + if commonLen+1 == len(extNibbles) { + branch[extNibbles[commonLen]] = childRef + } else { + branch[extNibbles[commonLen]] = t.hashNode(encodeExtension(extNibbles[commonLen+1:], childRef)) + } + + if commonLen == len(nibbles) { + branch[16] = value + } else { + branch[nibbles[commonLen]] = t.hashNode(encodeLeaf(nibbles[commonLen+1:], value)) + } + + branchNode := t.hashNode(encodeBranch(branch)) + + if commonLen > 0 { + return t.hashNode(encodeExtension(nibbles[:commonLen], branchNode)) + } + return branchNode + + case nodeTypeBranch: + if len(nibbles) == 0 { + existing := t.resolveNode(node) + _, branchData := decodeNode(existing) + branch := decodeBranchRefs(branchData, existing) + branch[16] = value + return t.hashNode(encodeBranch(branch)) + } + + existing := t.resolveNode(node) + _, branchData := decodeNode(existing) + branch := decodeBranchRefs(branchData, existing) + + idx := nibbles[0] + branch[idx] = t.insert(branch[idx], nibbles[1:], value) + return t.hashNode(encodeBranch(branch)) + } + + return t.hashNode(encodeLeaf(nibbles, value)) +} + +func (t *mpt) hashNode(encoded []byte) []byte { + if len(encoded) < 32 { + return encoded + } + hash := keccak256(encoded) + t.db[string(hash)] = encoded + return hash +} + +func (t *mpt) resolveNode(ref []byte) []byte { + if len(ref) == 32 { + if data, ok := t.db[string(ref)]; ok { + return data + } + return nil + } + return ref +} + +// --- Node types --- + +const ( + nodeTypeLeaf = 0 + nodeTypeExtension = 1 + nodeTypeBranch = 2 +) + +func decodeNode(data []byte) (int, [][]byte) { + items := rlpDecodeList(data) + if len(items) == 17 { + return nodeTypeBranch, items + } + if len(items) == 2 { + prefix := items[0] + nibbles := compactToNibbles(prefix) + if len(nibbles) > 0 && (nibbles[0]&0x02) != 0 { + // Leaf (flag bit 1 set) + return nodeTypeLeaf, [][]byte{nibbles[1:], items[1]} + } + // Extension + return nodeTypeExtension, [][]byte{nibbles[1:], items[1]} + } + return -1, nil +} + +func decodeBranchRefs(_ [][]byte, rawNode []byte) [][]byte { + branch := make([][]byte, 17) + // Re-decode to get the raw RLP items including embedded nodes + rawItems := rlpDecodeListRaw(rawNode) + for i := range min(17, len(rawItems)) { + if len(rawItems[i]) == 0 || (len(rawItems[i]) == 1 && rawItems[i][0] == 0x80) { + branch[i] = nil + } else { + branch[i] = rawItems[i] + } + } + return branch +} + +// --- Compact (hex-prefix) encoding --- + +func bytesToNibbles(data []byte) []byte { + nibbles := make([]byte, len(data)*2) + for i, b := range data { + nibbles[i*2] = b >> 4 + nibbles[i*2+1] = b & 0x0f + } + return nibbles +} + +func nibblesToCompact(nibbles []byte, isLeaf bool) []byte { + flag := byte(0) + if isLeaf { + flag = 2 + } + + var compact []byte + if len(nibbles)%2 == 1 { + // Odd length: first nibble goes into first byte with flag + compact = append(compact, (flag+1)<<4|nibbles[0]) + nibbles = nibbles[1:] + } else { + compact = append(compact, flag<<4) + } + + for i := 0; i < len(nibbles); i += 2 { + compact = append(compact, nibbles[i]<<4|nibbles[i+1]) + } + return compact +} + +func compactToNibbles(compact []byte) []byte { + if len(compact) == 0 { + return nil + } + + flag := compact[0] >> 4 + var nibbles []byte + + // First nibble is the flag itself + nibbles = append(nibbles, flag) + + if flag&0x01 == 1 { + // Odd: lower nibble of first byte is data + nibbles = append(nibbles, compact[0]&0x0f) + } + + for _, b := range compact[1:] { + nibbles = append(nibbles, b>>4, b&0x0f) + } + return nibbles +} + +func encodeLeaf(nibbles, value []byte) []byte { + key := nibblesToCompact(nibbles, true) + items := [][]byte{ + rlpEncodeBytes(key), + rlpEncodeBytes(value), + } + return rlpEncodeListFromRLP(items) +} + +func encodeExtension(nibbles, childRef []byte) []byte { + key := nibblesToCompact(nibbles, false) + var childRLP []byte + if len(childRef) == 32 { + childRLP = rlpEncodeBytes(childRef) + } else { + childRLP = childRef // Already RLP encoded + } + items := [][]byte{ + rlpEncodeBytes(key), + childRLP, + } + return rlpEncodeListFromRLP(items) +} + +func encodeBranch(children [][]byte) []byte { + var items [][]byte + for i := range 16 { + if children[i] == nil { + items = append(items, []byte{0x80}) // RLP empty string + } else if len(children[i]) == 32 { + items = append(items, rlpEncodeBytes(children[i])) + } else { + items = append(items, children[i]) // Inline node + } + } + // Value slot (index 16) + if children[16] == nil { + items = append(items, []byte{0x80}) + } else { + items = append(items, rlpEncodeBytes(children[16])) + } + return rlpEncodeListFromRLP(items) +} + +func commonPrefixLen(a, b []byte) int { + maxLen := min(len(a), len(b)) + for i := range maxLen { + if a[i] != b[i] { + return i + } + } + return maxLen +} + +// --- RLP decoding --- + +func rlpDecodeList(data []byte) [][]byte { + if len(data) == 0 { + return nil + } + + _, payload := rlpDecodeListPayload(data) + if payload == nil { + return nil + } + + var items [][]byte + offset := 0 + for offset < len(payload) { + item, consumed := rlpDecodeItem(payload[offset:]) + items = append(items, item) + offset += consumed + } + return items +} + +func rlpDecodeListRaw(data []byte) [][]byte { + if len(data) == 0 { + return nil + } + + _, payload := rlpDecodeListPayload(data) + if payload == nil { + return nil + } + + var items [][]byte + offset := 0 + for offset < len(payload) { + raw, consumed := rlpDecodeItemRaw(payload[offset:]) + items = append(items, raw) + offset += consumed + } + return items +} + +func rlpDecodeListPayload(data []byte) (headerLen int, payload []byte) { + if len(data) == 0 { + return 0, nil + } + prefix := data[0] + if prefix >= 0xc0 && prefix <= 0xf7 { + length := int(prefix - 0xc0) + if 1+length > len(data) { + return 0, nil + } + return 1, data[1 : 1+length] + } + if prefix > 0xf7 { + lenOfLen := int(prefix - 0xf7) + if 1+lenOfLen > len(data) { + return 0, nil + } + length := decodeUintBE(data[1 : 1+lenOfLen]) + headerLen = 1 + lenOfLen + if headerLen+length > len(data) { + return 0, nil + } + return headerLen, data[headerLen : headerLen+length] + } + return 0, nil +} + +func rlpDecodeItem(data []byte) (value []byte, consumed int) { + if len(data) == 0 { + return nil, 0 + } + prefix := data[0] + + // Single byte + if prefix < 0x80 { + return data[:1], 1 + } + + // Short string (0-55 bytes) + if prefix <= 0xb7 { + length := int(prefix - 0x80) + consumed = 1 + length + if consumed > len(data) { + return nil, consumed + } + return data[1:consumed], consumed + } + + // Long string + if prefix <= 0xbf { + lenOfLen := int(prefix - 0xb7) + length := decodeUintBE(data[1 : 1+lenOfLen]) + consumed = 1 + lenOfLen + length + if consumed > len(data) { + return nil, consumed + } + return data[1+lenOfLen : consumed], consumed + } + + // Short list (0-55 bytes) + if prefix <= 0xf7 { + length := int(prefix - 0xc0) + consumed = 1 + length + return data[1:consumed], consumed + } + + // Long list + lenOfLen := int(prefix - 0xf7) + length := decodeUintBE(data[1 : 1+lenOfLen]) + consumed = 1 + lenOfLen + length + if consumed > len(data) { + return nil, consumed + } + return data[1+lenOfLen : consumed], consumed +} + +func rlpDecodeItemRaw(data []byte) (raw []byte, consumed int) { + if len(data) == 0 { + return nil, 0 + } + prefix := data[0] + + if prefix < 0x80 { + return data[:1], 1 + } + if prefix <= 0xb7 { + length := int(prefix - 0x80) + consumed = 1 + length + if consumed > len(data) { + return nil, consumed + } + return data[1:consumed], consumed + } + if prefix <= 0xbf { + lenOfLen := int(prefix - 0xb7) + length := decodeUintBE(data[1 : 1+lenOfLen]) + consumed = 1 + lenOfLen + length + if consumed > len(data) { + return nil, consumed + } + return data[1+lenOfLen : consumed], consumed + } + if prefix <= 0xf7 { + length := int(prefix - 0xc0) + consumed = 1 + length + if consumed > len(data) { + return nil, consumed + } + return data[:consumed], consumed + } + lenOfLen := int(prefix - 0xf7) + length := decodeUintBE(data[1 : 1+lenOfLen]) + consumed = 1 + lenOfLen + length + if consumed > len(data) { + return nil, consumed + } + return data[:consumed], consumed +} + +func decodeUintBE(data []byte) int { + result := 0 + for _, b := range data { + result = result<<8 | int(b) + } + return result +} diff --git a/internal/eth/receipt_test.go b/internal/eth/receipt_test.go new file mode 100644 index 00000000..5c3806f6 --- /dev/null +++ b/internal/eth/receipt_test.go @@ -0,0 +1,201 @@ +package eth + +import ( + "encoding/hex" + "testing" +) + +func TestKeccak256(t *testing.T) { + // Keccak256 of empty string + result := keccak256([]byte{}) + expected := "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + got := hex.EncodeToString(result) + if got != expected { + t.Errorf("keccak256 empty: got %s, want %s", got, expected) + } +} + +func TestRlpEncodeUint(t *testing.T) { + tests := []struct { + val uint64 + want string + }{ + {0, "80"}, + {1, "01"}, + {127, "7f"}, + {128, "8180"}, + {256, "820100"}, + {1024, "820400"}, + } + for _, tt := range tests { + got := hex.EncodeToString(rlpEncodeUint(tt.val)) + if got != tt.want { + t.Errorf("rlpEncodeUint(%d): got %s, want %s", tt.val, got, tt.want) + } + } +} + +func TestRlpEncodeBytes(t *testing.T) { + tests := []struct { + name string + val []byte + want string + }{ + {"empty", []byte{}, "80"}, + {"single byte < 128", []byte{0x42}, "42"}, + {"single byte 128", []byte{0x80}, "8180"}, + {"short string", []byte("dog"), "83646f67"}, + } + for _, tt := range tests { + got := hex.EncodeToString(rlpEncodeBytes(tt.val)) + if got != tt.want { + t.Errorf("rlpEncodeBytes(%s): got %s, want %s", tt.name, got, tt.want) + } + } +} + +func TestRlpEncodeListFromRLP(t *testing.T) { + // RLP of ["cat", "dog"] + cat := rlpEncodeBytes([]byte("cat")) + dog := rlpEncodeBytes([]byte("dog")) + got := hex.EncodeToString(rlpEncodeListFromRLP([][]byte{cat, dog})) + want := "c88363617483646f67" + if got != want { + t.Errorf("rlpEncodeListFromRLP: got %s, want %s", got, want) + } +} + +func TestHexToBytes(t *testing.T) { + tests := []struct { + input string + want string + }{ + {"0x1234", "1234"}, + {"0xabcd", "abcd"}, + {"1234", "1234"}, + {"0x0", "00"}, + } + for _, tt := range tests { + got := hex.EncodeToString(hexToBytes(tt.input)) + if got != tt.want { + t.Errorf("hexToBytes(%s): got %s, want %s", tt.input, got, tt.want) + } + } +} + +func TestHexToUint64(t *testing.T) { + tests := []struct { + input string + want uint64 + }{ + {"0x0", 0}, + {"0x1", 1}, + {"0xa", 10}, + {"0xff", 255}, + {"0x100", 256}, + } + for _, tt := range tests { + got := hexToUint64(tt.input) + if got != tt.want { + t.Errorf("hexToUint64(%s): got %d, want %d", tt.input, got, tt.want) + } + } +} + +func TestComputeReceiptsRootEmpty(t *testing.T) { + root, err := ComputeReceiptsRoot(nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + want := "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + if root != want { + t.Errorf("empty receipts root: got %s, want %s", root, want) + } +} + +func TestBytesToNibbles(t *testing.T) { + got := bytesToNibbles([]byte{0xab, 0xcd}) + want := []byte{0xa, 0xb, 0xc, 0xd} + if len(got) != len(want) { + t.Fatalf("bytesToNibbles length: got %d, want %d", len(got), len(want)) + } + for i := range got { + if got[i] != want[i] { + t.Errorf("bytesToNibbles[%d]: got %d, want %d", i, got[i], want[i]) + } + } +} + +func TestNibblesToCompactLeaf(t *testing.T) { + // Leaf with even nibbles [1, 2, 3, 4] + compact := nibblesToCompact([]byte{1, 2, 3, 4}, true) + got := hex.EncodeToString(compact) + want := "2012" + "34" + if got != want { + t.Errorf("nibblesToCompact(even leaf): got %s, want %s", got, want) + } + + // Leaf with odd nibbles [1, 2, 3] + compact = nibblesToCompact([]byte{1, 2, 3}, true) + got = hex.EncodeToString(compact) + want = "31" + "23" + if got != want { + t.Errorf("nibblesToCompact(odd leaf): got %s, want %s", got, want) + } +} + +func TestComputeReceiptsRootSingleLegacyReceipt(t *testing.T) { + // Single legacy receipt (type 0) with status 1, minimal data + receipt := map[string]any{ + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x" + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": []any{}, + "type": "0x0", + } + root, err := ComputeReceiptsRoot([]map[string]any{receipt}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // The root should be a valid hex hash + if len(root) != 66 { // "0x" + 64 hex chars + t.Errorf("unexpected root length: %d", len(root)) + } + // Verify it's not the empty root + emptyRoot := "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + if root == emptyRoot { + t.Error("single receipt should not produce empty root") + } +} + +func TestEncodeReceipt(t *testing.T) { + receipt := map[string]any{ + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x" + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": []any{}, + "type": "0x0", + } + encoded, err := encodeReceipt(receipt) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(encoded) == 0 { + t.Error("encoded receipt should not be empty") + } +} + +func TestEncodeLog(t *testing.T) { + logMap := map[string]any{ + "address": "0xdac17f958d2ee523a2206206994597c13d831ec7", + "topics": []any{"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"}, + "data": "0x0000000000000000000000000000000000000000000000000000000005f5e100", + } + encoded, err := encodeLog(logMap) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(encoded) == 0 { + t.Error("encoded log should not be empty") + } +} diff --git a/internal/rpc/wsconn.go b/internal/rpc/wsconn.go new file mode 100644 index 00000000..34c83517 --- /dev/null +++ b/internal/rpc/wsconn.go @@ -0,0 +1,66 @@ +package rpc + +import ( + "fmt" + "net/http" + "sync" + "time" + + "github.com/gorilla/websocket" +) + +// WSConn wraps a gorilla/websocket.Conn for persistent JSON-RPC communication. +type WSConn struct { + conn *websocket.Conn + mu sync.Mutex +} + +// Dial establishes a persistent WebSocket connection to the given URL. +func Dial(url string) (*WSConn, error) { + dialer := websocket.Dialer{ + HandshakeTimeout: 30 * time.Second, + } + conn, _, err := dialer.Dial(url, http.Header{}) + if err != nil { + return nil, fmt.Errorf("websocket dial %s: %w", url, err) + } + return &WSConn{conn: conn}, nil +} + +// SendJSON writes a JSON-RPC request to the WebSocket connection. +func (w *WSConn) SendJSON(request any) error { + w.mu.Lock() + defer w.mu.Unlock() + return w.conn.WriteJSON(request) +} + +// RecvJSON reads a JSON-RPC response from the WebSocket connection. +func (w *WSConn) RecvJSON(response any) error { + return w.conn.ReadJSON(response) +} + +// CallJSON sends a JSON-RPC request and reads the response. +func (w *WSConn) CallJSON(request any, response any) error { + if err := w.SendJSON(request); err != nil { + return fmt.Errorf("send: %w", err) + } + if err := w.RecvJSON(response); err != nil { + return fmt.Errorf("recv: %w", err) + } + return nil +} + +// Close gracefully closes the WebSocket connection. +func (w *WSConn) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + err := w.conn.WriteMessage( + websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), + ) + if err != nil { + _ = w.conn.Close() + return err + } + return w.conn.Close() +} diff --git a/internal/tools/block_by_number.go b/internal/tools/block_by_number.go new file mode 100644 index 00000000..99a000b1 --- /dev/null +++ b/internal/tools/block_by_number.go @@ -0,0 +1,110 @@ +package tools + +import ( + "fmt" + "log" + "os" + "os/signal" + "syscall" + "time" + + "github.com/erigontech/rpc-tests/internal/rpc" + "github.com/urfave/cli/v2" +) + +var blockByNumberCommand = &cli.Command{ + Name: "block-by-number", + Usage: "Query latest/safe/finalized block numbers via WebSocket every 2s", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "url", + Value: "ws://127.0.0.1:8545", + Usage: "WebSocket URL of the Ethereum node", + }, + }, + Action: runBlockByNumber, +} + +type jsonRPCRequest struct { + Jsonrpc string `json:"jsonrpc"` + Method string `json:"method"` + Params []any `json:"params"` + ID int `json:"id"` +} + +type jsonRPCResponse struct { + Jsonrpc string `json:"jsonrpc"` + ID int `json:"id"` + Result any `json:"result"` + Error any `json:"error"` +} + +func runBlockByNumber(c *cli.Context) error { + url := c.String("url") + + conn, err := rpc.Dial(url) + if err != nil { + return fmt.Errorf("connect to %s: %w", url, err) + } + defer conn.Close() + log.Printf("Successfully connected to Ethereum node at %s", url) + + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + delay := 2 * time.Second + log.Printf("Query blocks started delay: %v", delay) + + ticker := time.NewTicker(delay) + defer ticker.Stop() + + // Query immediately, then on ticker + for { + latest, err := getBlockNumber(conn, "latest", 1) + if err != nil { + return fmt.Errorf("get latest block: %w", err) + } + safe, err := getBlockNumber(conn, "safe", 2) + if err != nil { + return fmt.Errorf("get safe block: %w", err) + } + finalized, err := getBlockNumber(conn, "finalized", 3) + if err != nil { + return fmt.Errorf("get finalized block: %w", err) + } + log.Printf("Block latest: %s safe: %s finalized: %s", latest, safe, finalized) + + select { + case <-sigs: + log.Printf("Received interrupt signal") + log.Printf("Query blocks terminated") + return nil + case <-ticker.C: + } + } +} + +func getBlockNumber(conn *rpc.WSConn, tag string, id int) (string, error) { + req := jsonRPCRequest{ + Jsonrpc: "2.0", + Method: "eth_getBlockByNumber", + Params: []any{tag, false}, + ID: id, + } + var resp jsonRPCResponse + if err := conn.CallJSON(req, &resp); err != nil { + return "", err + } + if resp.Error != nil { + return "", fmt.Errorf("RPC error: %v", resp.Error) + } + result, ok := resp.Result.(map[string]any) + if !ok { + return "", fmt.Errorf("unexpected result type: %T", resp.Result) + } + number, ok := result["number"].(string) + if !ok { + return "", fmt.Errorf("missing number field in block result") + } + return number, nil +} diff --git a/internal/tools/empty_blocks.go b/internal/tools/empty_blocks.go new file mode 100644 index 00000000..1b41b686 --- /dev/null +++ b/internal/tools/empty_blocks.go @@ -0,0 +1,201 @@ +package tools + +import ( + "context" + "fmt" + "log" + "os" + "os/signal" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/erigontech/rpc-tests/internal/rpc" + "github.com/urfave/cli/v2" +) + +var emptyBlocksCommand = &cli.Command{ + Name: "empty-blocks", + Usage: "Search backward for N empty blocks from latest", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "url", + Value: "http://localhost:8545", + Usage: "HTTP URL of the Ethereum node", + }, + &cli.IntFlag{ + Name: "count", + Value: 10, + Usage: "Number of empty blocks to search for", + }, + &cli.BoolFlag{ + Name: "ignore-withdrawals", + Usage: "Ignore withdrawals when determining if a block is empty", + }, + &cli.BoolFlag{ + Name: "compare-state-root", + Usage: "Compare state root with parent block", + }, + }, + Action: runEmptyBlocks, +} + +type blockInfo struct { + Number uint64 + Transactions []any + Withdrawals []any + HasWithdrawals bool + StateRoot string + ParentHash string +} + +func runEmptyBlocks(c *cli.Context) error { + url := c.String("url") + count := c.Int("count") + ignoreWithdrawals := c.Bool("ignore-withdrawals") + compareStateRoot := c.Bool("compare-state-root") + + client := rpc.NewClient("http", "", 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigs + log.Printf("Received interrupt signal") + cancel() + }() + + // Strip protocol prefix to get target for rpc.Client + target := strings.TrimPrefix(strings.TrimPrefix(url, "http://"), "https://") + + latestBlock, _, err := rpc.GetLatestBlockNumber(ctx, client, target) + if err != nil { + return fmt.Errorf("get latest block: %w", err) + } + log.Printf("Latest block number: %d", latestBlock) + log.Printf("Searching for the last %d empty blocks...", count) + + var emptyBlocks []uint64 + batchSize := 100 + + currentBlock := int64(latestBlock) + for currentBlock >= 0 && len(emptyBlocks) < count { + if ctx.Err() != nil { + break + } + + startBlock := max(0, currentBlock-int64(batchSize)+1) + + // Fetch blocks in parallel + blocks := make([]blockInfo, currentBlock-startBlock+1) + var wg sync.WaitGroup + var mu sync.Mutex + var fetchErr error + + for i := startBlock; i <= currentBlock; i++ { + wg.Add(1) + go func(blockNum int64, idx int) { + defer wg.Done() + bi, err := fetchBlockInfo(ctx, client, target, blockNum) + if err != nil { + mu.Lock() + if fetchErr == nil { + fetchErr = err + } + mu.Unlock() + return + } + blocks[idx] = bi + }(i, int(i-startBlock)) + } + wg.Wait() + + if fetchErr != nil { + log.Printf("Warning: failed to fetch some blocks: %v", fetchErr) + } + + // Process results backward + for i := len(blocks) - 1; i >= 0 && len(emptyBlocks) < count; i-- { + b := blocks[i] + if b.Number == 0 && i > 0 { + continue // skip unfetched blocks + } + + noTxns := len(b.Transactions) == 0 + if !noTxns { + continue + } + if !ignoreWithdrawals && b.HasWithdrawals && len(b.Withdrawals) > 0 { + continue + } + + emptyBlocks = append(emptyBlocks, b.Number) + log.Printf("Block %d is empty. Total found: %d/%d", b.Number, len(emptyBlocks), count) + + if compareStateRoot && b.Number > 0 { + parent, err := fetchBlockInfo(ctx, client, target, int64(b.Number-1)) + if err == nil { + if b.StateRoot == parent.StateRoot { + log.Printf(" stateRoot: %s MATCHES", b.StateRoot) + } else { + log.Printf(" stateRoot: %s DOES NOT MATCH [parent stateRoot: %s]", b.StateRoot, parent.StateRoot) + } + } + } + } + + currentBlock = startBlock - 1 + if currentBlock >= 0 && currentBlock%100000 == 0 { + log.Printf("Reached block %d...", currentBlock) + } + } + + if len(emptyBlocks) == count { + log.Printf("Found last %d empty blocks!", count) + } else if len(emptyBlocks) == 0 { + log.Printf("Warning: could not find %d empty blocks within the chain history.", count) + } + + return nil +} + +func fetchBlockInfo(ctx context.Context, client *rpc.Client, target string, blockNum int64) (blockInfo, error) { + hexNum := "0x" + strconv.FormatInt(blockNum, 16) + req := fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["%s",false],"id":1}`, hexNum) + + var resp map[string]any + _, err := client.Call(ctx, target, []byte(req), &resp) + if err != nil { + return blockInfo{}, err + } + if errVal, ok := resp["error"]; ok { + return blockInfo{}, fmt.Errorf("RPC error: %v", errVal) + } + result, ok := resp["result"].(map[string]any) + if !ok { + return blockInfo{}, fmt.Errorf("unexpected result type") + } + + bi := blockInfo{ + Number: uint64(blockNum), + } + + if txns, ok := result["transactions"].([]any); ok { + bi.Transactions = txns + } + if withdrawals, ok := result["withdrawals"].([]any); ok { + bi.HasWithdrawals = true + bi.Withdrawals = withdrawals + } + if sr, ok := result["stateRoot"].(string); ok { + bi.StateRoot = sr + } + if ph, ok := result["parentHash"].(string); ok { + bi.ParentHash = ph + } + + return bi, nil +} diff --git a/internal/tools/filter_changes.go b/internal/tools/filter_changes.go new file mode 100644 index 00000000..6631ae28 --- /dev/null +++ b/internal/tools/filter_changes.go @@ -0,0 +1,117 @@ +package tools + +import ( + "fmt" + "log" + "os" + "os/signal" + "syscall" + "time" + + "github.com/erigontech/rpc-tests/internal/rpc" + "github.com/urfave/cli/v2" +) + +// ERC20 Transfer event topic +const transferTopic = "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3e0" + +var filterChangesCommand = &cli.Command{ + Name: "filter-changes", + Usage: "Create ERC20 Transfer filter and poll changes/logs via WebSocket", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "url", + Value: "ws://127.0.0.1:8545", + Usage: "WebSocket URL of the Ethereum node", + }, + }, + Action: runFilterChanges, +} + +func runFilterChanges(c *cli.Context) error { + url := c.String("url") + + conn, err := rpc.Dial(url) + if err != nil { + return fmt.Errorf("connect to %s: %w", url, err) + } + defer conn.Close() + log.Printf("Successfully connected to Ethereum node at %s", url) + + // Create filter with Transfer topic + var filterResp jsonRPCResponse + err = conn.CallJSON(jsonRPCRequest{ + Jsonrpc: "2.0", + Method: "eth_newFilter", + Params: []any{map[string]any{"topics": []string{transferTopic}}}, + ID: 1, + }, &filterResp) + if err != nil { + return fmt.Errorf("create filter: %w", err) + } + if filterResp.Error != nil { + return fmt.Errorf("create filter RPC error: %v", filterResp.Error) + } + filterID, ok := filterResp.Result.(string) + if !ok { + return fmt.Errorf("unexpected filter ID type: %T", filterResp.Result) + } + log.Printf("State change filter registered: %s", filterID) + + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + delay := 2 * time.Second + ticker := time.NewTicker(delay) + defer ticker.Stop() + + for { + // Get filter changes + var changesResp jsonRPCResponse + err = conn.CallJSON(jsonRPCRequest{ + Jsonrpc: "2.0", + Method: "eth_getFilterChanges", + Params: []any{filterID}, + ID: 2, + }, &changesResp) + if err != nil { + log.Printf("Error getting filter changes: %v", err) + } else if changes, ok := changesResp.Result.([]any); ok && len(changes) > 0 { + log.Printf("Changes: %v", changes) + } else { + log.Printf("No change received") + } + + // Get filter logs + var logsResp jsonRPCResponse + err = conn.CallJSON(jsonRPCRequest{ + Jsonrpc: "2.0", + Method: "eth_getFilterLogs", + Params: []any{filterID}, + ID: 3, + }, &logsResp) + if err != nil { + log.Printf("Error getting filter logs: %v", err) + } else if logs, ok := logsResp.Result.([]any); ok && len(logs) > 0 { + log.Printf("Logs: %v", logs) + } else { + log.Printf("No log received") + } + + select { + case <-sigs: + log.Printf("Received interrupt signal") + // Uninstall filter + var uninstallResp jsonRPCResponse + _ = conn.CallJSON(jsonRPCRequest{ + Jsonrpc: "2.0", + Method: "eth_uninstallFilter", + Params: []any{filterID}, + ID: 4, + }, &uninstallResp) + log.Printf("State change filter unregistered") + return nil + case <-ticker.C: + } + } +} diff --git a/internal/tools/graphql.go b/internal/tools/graphql.go new file mode 100644 index 00000000..b46de635 --- /dev/null +++ b/internal/tools/graphql.go @@ -0,0 +1,320 @@ +package tools + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/urfave/cli/v2" +) + +var graphqlCommand = &cli.Command{ + Name: "graphql", + Usage: "Execute GraphQL queries against an Ethereum node", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "http-url", + Value: "http://127.0.0.1:8545/graphql", + Usage: "GraphQL URL of the Ethereum node", + }, + &cli.StringFlag{ + Name: "query", + Usage: "GraphQL query string (mutually exclusive with --tests-url)", + }, + &cli.StringFlag{ + Name: "tests-url", + Usage: "GitHub tree URL with test files (mutually exclusive with --query)", + }, + &cli.BoolFlag{ + Name: "stop-at-first-error", + Usage: "Stop execution at first test error", + }, + &cli.IntFlag{ + Name: "test-number", + Value: -1, + Usage: "Run only the test at this index (0-based)", + }, + }, + Action: runGraphQL, +} + +type graphqlTestCase struct { + Request string `json:"request"` + Responses []json.RawMessage `json:"responses"` +} + +func runGraphQL(c *cli.Context) error { + httpURL := c.String("http-url") + query := c.String("query") + testsURL := c.String("tests-url") + stopAtError := c.Bool("stop-at-first-error") + testNumber := c.Int("test-number") + + if query == "" && testsURL == "" { + return fmt.Errorf("must specify either --query or --tests-url") + } + if query != "" && testsURL != "" { + return fmt.Errorf("--query and --tests-url are mutually exclusive") + } + + client := &http.Client{} + + if query != "" { + result, err := executeGraphQLQuery(client, httpURL, query) + if err != nil { + return err + } + log.Printf("Result: %s", result) + return nil + } + + return executeGraphQLTests(client, httpURL, testsURL, stopAtError, testNumber) +} + +func executeGraphQLQuery(client *http.Client, url, query string) ([]byte, error) { + body := []byte(query) + req, err := http.NewRequest("POST", url, bytes.NewReader(body)) + if err != nil { + return nil, fmt.Errorf("create request: %w", err) + } + req.Header.Set("Content-Type", "application/graphql") + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("execute query: %w", err) + } + defer resp.Body.Close() + + return io.ReadAll(resp.Body) +} + +func executeGraphQLTests(client *http.Client, httpURL, testsURL string, stopAtError bool, testNumber int) error { + // Download test files from GitHub + tempDir, err := downloadGitHubDirectory(client, testsURL) + if err != nil { + return fmt.Errorf("download tests: %w", err) + } + defer func() { + log.Printf("Cleaning up temporary directory: %s", tempDir) + os.RemoveAll(tempDir) + }() + + log.Printf("Starting test execution using files from %s", tempDir) + + // Discover and sort test files + entries, err := os.ReadDir(tempDir) + if err != nil { + return fmt.Errorf("read test dir: %w", err) + } + + var testFiles []string + for _, e := range entries { + if !e.IsDir() && strings.HasSuffix(e.Name(), ".json") { + testFiles = append(testFiles, e.Name()) + } + } + sort.Strings(testFiles) + + if len(testFiles) == 0 { + log.Printf("Warning: no *.json files found in %s. Aborting tests.", tempDir) + return fmt.Errorf("no test files found") + } + + totalTests := len(testFiles) + if testNumber >= 0 { + totalTests = 1 + } + passedTests := 0 + + graphqlClient := &http.Client{} + + for i, testFile := range testFiles { + if testNumber >= 0 && testNumber != i { + continue + } + + testPath := filepath.Join(tempDir, testFile) + data, err := os.ReadFile(testPath) + if err != nil { + log.Printf("Test %d FAILED: cannot read %s: %v", i+1, testFile, err) + continue + } + + var tc graphqlTestCase + if err := json.Unmarshal(data, &tc); err != nil { + log.Printf("Test %d FAILED: invalid JSON in %s: %v", i+1, testFile, err) + continue + } + + if tc.Request == "" { + log.Printf("Test %d FAILED: 'request' field is missing in %s", i+1, testFile) + continue + } + if len(tc.Responses) == 0 { + log.Printf("Test %d FAILED: 'responses' field is missing in %s", i+1, testFile) + continue + } + + // Execute query + actualResult, err := executeGraphQLQuery(graphqlClient, httpURL, strings.TrimSpace(tc.Request)) + if err != nil { + log.Printf("Test %d FAILED: query execution error: %v", i+1, err) + if stopAtError { + log.Printf("Testing finished after first error. Passed: %d/%d", passedTests, totalTests) + return fmt.Errorf("stopped at first error") + } + continue + } + + // Parse actual result + var actualData map[string]any + if err := json.Unmarshal(actualResult, &actualData); err != nil { + log.Printf("Test %d FAILED: cannot parse response: %v", i+1, err) + continue + } + + // Compare actual vs expected: test passes if actual matches ANY expected response + passing := false + for _, expectedRaw := range tc.Responses { + var expected map[string]any + if err := json.Unmarshal(expectedRaw, &expected); err != nil { + continue + } + + // Check if actual data matches expected data + actualDataField := actualData["data"] + expectedDataField := expected["data"] + if jsonEqual(actualDataField, expectedDataField) { + passing = true + break + } + + // Check if both have errors + if expected["errors"] != nil && actualData["errors"] != nil { + passing = true + break + } + } + + if passing { + passedTests++ + log.Printf("Test %d %s PASSED.", i+1, testFile) + } else { + log.Printf("Test %d %s FAILED: actual result didn't match any expected response.", i+1, testFile) + log.Printf("Request: %s", strings.TrimSpace(tc.Request)) + log.Printf("Actual: %s", string(actualResult)) + if stopAtError { + log.Printf("Testing finished after first error. Passed: %d/%d", passedTests, totalTests) + return fmt.Errorf("stopped at first error") + } + } + } + + log.Printf("Testing finished. Passed: %d/%d", passedTests, totalTests) + if passedTests != totalTests { + return fmt.Errorf("some tests failed: %d/%d passed", passedTests, totalTests) + } + return nil +} + +func jsonEqual(a, b any) bool { + aj, err1 := json.Marshal(a) + bj, err2 := json.Marshal(b) + if err1 != nil || err2 != nil { + return false + } + return bytes.Equal(aj, bj) +} + +type githubContent struct { + Name string `json:"name"` + Type string `json:"type"` + DownloadURL string `json:"download_url"` +} + +func parseGitHubTreeURL(rawURL string) (owner, repo, branch, folderPath string, err error) { + u, err := url.Parse(rawURL) + if err != nil { + return "", "", "", "", fmt.Errorf("parse URL: %w", err) + } + parts := strings.Split(strings.Trim(u.Path, "/"), "/") + if len(parts) < 5 || parts[2] != "tree" { + return "", "", "", "", fmt.Errorf("invalid GitHub tree URL format: %s", rawURL) + } + owner = parts[0] + repo = parts[1] + branch = parts[3] + folderPath = strings.Join(parts[4:], "/") + return +} + +func downloadGitHubDirectory(client *http.Client, treeURL string) (string, error) { + owner, repo, branch, folderPath, err := parseGitHubTreeURL(treeURL) + if err != nil { + return "", err + } + + apiURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/contents/%s?ref=%s", owner, repo, folderPath, branch) + + tempDir, err := os.MkdirTemp("", "graphql-tests-*") + if err != nil { + return "", fmt.Errorf("create temp dir: %w", err) + } + log.Printf("Downloading test files to temporary directory: %s", tempDir) + + resp, err := client.Get(apiURL) + if err != nil { + os.RemoveAll(tempDir) + return "", fmt.Errorf("fetch GitHub API: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + os.RemoveAll(tempDir) + body, _ := io.ReadAll(resp.Body) + return "", fmt.Errorf("GitHub API error %d: %s", resp.StatusCode, string(body[:min(len(body), 100)])) + } + + var contents []githubContent + if err := json.NewDecoder(resp.Body).Decode(&contents); err != nil { + os.RemoveAll(tempDir) + return "", fmt.Errorf("decode GitHub API response: %w", err) + } + + downloaded := 0 + for _, item := range contents { + if item.Type != "file" || !strings.HasSuffix(item.Name, ".json") { + continue + } + + fileResp, err := client.Get(item.DownloadURL) + if err != nil { + log.Printf("Warning: failed to download %s: %v", item.Name, err) + continue + } + + data, err := io.ReadAll(fileResp.Body) + fileResp.Body.Close() + if err != nil { + log.Printf("Warning: failed to read %s: %v", item.Name, err) + continue + } + + if err := os.WriteFile(filepath.Join(tempDir, item.Name), data, 0644); err != nil { + log.Printf("Warning: failed to write %s: %v", item.Name, err) + continue + } + downloaded++ + } + + log.Printf("Downloaded %d test files.", downloaded) + return tempDir, nil +} diff --git a/internal/tools/latest_block_logs.go b/internal/tools/latest_block_logs.go new file mode 100644 index 00000000..387e9057 --- /dev/null +++ b/internal/tools/latest_block_logs.go @@ -0,0 +1,191 @@ +package tools + +import ( + "context" + "fmt" + "log" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "github.com/erigontech/rpc-tests/internal/rpc" + "github.com/urfave/cli/v2" +) + +const emptyTrieRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + +var latestBlockLogsCommand = &cli.Command{ + Name: "latest-block-logs", + Usage: "Monitor latest block and validate getLogs vs receiptsRoot", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "url", + Value: "http://127.0.0.1:8545", + Usage: "HTTP URL of the Ethereum node", + }, + &cli.Float64Flag{ + Name: "interval", + Value: 0.1, + Usage: "Sleep interval between queries in seconds", + }, + }, + Action: runLatestBlockLogs, +} + +func runLatestBlockLogs(c *cli.Context) error { + url := c.String("url") + interval := time.Duration(c.Float64("interval") * float64(time.Second)) + + client := rpc.NewClient("http", "", 0) + target := strings.TrimPrefix(strings.TrimPrefix(url, "http://"), "https://") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigs + log.Printf("Received interrupt signal. Shutting down...") + cancel() + }() + + log.Printf("Query latest block logs started... Press Ctrl+C to stop.") + + var currentBlockNumber string + for { + if ctx.Err() != nil { + break + } + + // Get latest block + block, err := getBlock(ctx, client, target, "latest") + if err != nil { + log.Printf("Error: get_block failed: %v", err) + select { + case <-ctx.Done(): + case <-time.After(interval): + } + continue + } + + blockNumber, _ := block["number"].(string) + if blockNumber == currentBlockNumber { + select { + case <-ctx.Done(): + case <-time.After(interval): + } + continue + } + + log.Printf("Latest block is %s", blockNumber) + currentBlockNumber = blockNumber + blockHash, _ := block["hash"].(string) + receiptsRoot, _ := block["receiptsRoot"].(string) + + // Call eth_getLogs with block hash + logs, err := getLogs(ctx, client, target, blockHash) + if err != nil { + log.Printf("Error: get_logs for block %s failed: %v", blockNumber, err) + select { + case <-ctx.Done(): + case <-time.After(interval): + } + continue + } + + if len(logs) > 0 { + log.Printf("Block %s: eth_getLogs returned %d log(s).", blockNumber, len(logs)) + } else if receiptsRoot != emptyTrieRoot { + log.Printf("Block %s: eth_getLogs returned 0 logs and receiptsRoot is non-empty...", blockNumber) + + // Wait half block time to be sure latest block got executed + select { + case <-ctx.Done(): + break + case <-time.After(6 * time.Second): + } + + // Fetch receipts and count logs + receipts, err := getBlockReceipts(ctx, client, target, blockNumber) + if err != nil { + log.Printf("Error: get_block_receipts for block %s failed: %v", blockNumber, err) + continue + } + + numLogs := countReceiptLogs(receipts) + if numLogs > 0 { + log.Printf("Warning: Block %s: eth_getLogs returned 0 logs but there are %d", blockNumber, numLogs) + break + } + } + } + + log.Printf("Query latest block logs terminated.") + return nil +} + +func getBlock(ctx context.Context, client *rpc.Client, target, tag string) (map[string]any, error) { + req := fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["%s",false],"id":1}`, tag) + var resp map[string]any + _, err := client.Call(ctx, target, []byte(req), &resp) + if err != nil { + return nil, err + } + result, ok := resp["result"].(map[string]any) + if !ok { + return nil, fmt.Errorf("unexpected result type") + } + return result, nil +} + +func getLogs(ctx context.Context, client *rpc.Client, target, blockHash string) ([]any, error) { + req := fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getLogs","params":[{"blockHash":"%s"}],"id":1}`, blockHash) + var resp map[string]any + _, err := client.Call(ctx, target, []byte(req), &resp) + if err != nil { + return nil, err + } + if errVal, ok := resp["error"]; ok { + return nil, fmt.Errorf("RPC error: %v", errVal) + } + result, ok := resp["result"].([]any) + if !ok { + return nil, nil + } + return result, nil +} + +func getBlockReceipts(ctx context.Context, client *rpc.Client, target, blockNumber string) ([]any, error) { + req := fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getBlockReceipts","params":["%s"],"id":1}`, blockNumber) + var resp map[string]any + _, err := client.Call(ctx, target, []byte(req), &resp) + if err != nil { + return nil, err + } + if errVal, ok := resp["error"]; ok { + return nil, fmt.Errorf("RPC error: %v", errVal) + } + result, ok := resp["result"].([]any) + if !ok { + return nil, nil + } + return result, nil +} + +func countReceiptLogs(receipts []any) int { + count := 0 + for _, r := range receipts { + receipt, ok := r.(map[string]any) + if !ok { + continue + } + logs, ok := receipt["logs"].([]any) + if ok { + count += len(logs) + } + } + return count +} diff --git a/internal/tools/replay_request.go b/internal/tools/replay_request.go new file mode 100644 index 00000000..06d0fb17 --- /dev/null +++ b/internal/tools/replay_request.go @@ -0,0 +1,221 @@ +package tools + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "log" + "net/http" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/golang-jwt/jwt/v5" + "github.com/urfave/cli/v2" +) + +var replayRequestCommand = &cli.Command{ + Name: "replay-request", + Usage: "Replay JSON-RPC requests from Engine API log files", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "method", + Value: "engine_newPayloadV3", + Usage: "JSON-RPC method to replay", + }, + &cli.IntFlag{ + Name: "index", + Value: 1, + Usage: "Ordinal index of method occurrence to replay (-1 for all)", + }, + &cli.StringFlag{ + Name: "jwt", + Usage: "Path to JWT secret file (default: $HOME/prysm/jwt.hex)", + }, + &cli.StringFlag{ + Name: "path", + Usage: "Path to Engine API log directory (default: platform-specific Silkworm/logs)", + }, + &cli.StringFlag{ + Name: "url", + Value: "http://localhost:8551", + Usage: "HTTP URL of Engine API endpoint", + }, + &cli.BoolFlag{ + Name: "pretend", + Usage: "Do not send any HTTP request, just pretend", + }, + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + Usage: "Print verbose output", + }, + }, + Action: runReplayRequest, +} + +func runReplayRequest(c *cli.Context) error { + method := c.String("method") + methodIndex := c.Int("index") + jwtFile := c.String("jwt") + logPath := c.String("path") + targetURL := c.String("url") + pretend := c.Bool("pretend") + verbose := c.Bool("verbose") + + // Default JWT file + if jwtFile == "" { + home, _ := os.UserHomeDir() + jwtFile = filepath.Join(home, "prysm", "jwt.hex") + } + + // Default log path + if logPath == "" { + logPath = getDefaultLogPath() + } + + // Build headers + headers := map[string]string{ + "Content-Type": "application/json", + } + + // Read JWT and create auth token + jwtAuth, err := encodeJWTToken(jwtFile) + if err != nil { + log.Printf("Warning: JWT auth not available: %v", err) + } else { + headers["Authorization"] = "Bearer " + jwtAuth + } + + // Find the request + request, err := findJSONRPCRequest(logPath, method, methodIndex, verbose) + if err != nil { + return err + } + if request == "" { + log.Printf("Request %s not found [%d]", method, methodIndex) + return nil + } + + log.Printf("Request %s found [%d]", method, methodIndex) + if verbose { + log.Printf("%s", request) + } + + if pretend { + return nil + } + + // Send HTTP request + req, err := http.NewRequest("POST", targetURL, bytes.NewBufferString(request)) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + for k, v := range headers { + req.Header.Set(k, v) + } + + client := &http.Client{Timeout: 300 * time.Second} + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("post failed: %w", err) + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + log.Printf("Response got: %s", string(body)) + + return nil +} + +func getDefaultLogPath() string { + home, _ := os.UserHomeDir() + // Darwin: ~/Library/Silkworm/logs + // Linux: ~/Silkworm/logs + if _, err := os.Stat(filepath.Join(home, "Library")); err == nil { + return filepath.Join(home, "Library", "Silkworm", "logs") + } + return filepath.Join(home, "Silkworm", "logs") +} + +func encodeJWTToken(jwtFile string) (string, error) { + data, err := os.ReadFile(jwtFile) + if err != nil { + return "", err + } + contents := strings.TrimPrefix(strings.TrimSpace(string(data)), "0x") + + secretBytes, err := hex.DecodeString(contents) + if err != nil { + return "", fmt.Errorf("decode JWT secret: %w", err) + } + + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "iat": time.Now().Unix(), + }) + return token.SignedString(secretBytes) +} + +func findJSONRPCRequest(logDir, method string, methodIndex int, verbose bool) (string, error) { + // Find all engine_rpc_api log files + pattern := filepath.Join(logDir, "*engine_rpc_api*log") + matches, err := filepath.Glob(pattern) + if err != nil { + return "", fmt.Errorf("glob log files: %w", err) + } + if len(matches) == 0 { + // Try alternative: the path itself might be a file + if _, err := os.Stat(logDir); err == nil { + matches = []string{logDir} + } else { + return "", fmt.Errorf("no engine_rpc_api log files found in %s", logDir) + } + } + sort.Strings(matches) + + if verbose { + log.Printf("interface_log_dir_path: %s", logDir) + } + + methodCount := 0 + for _, logFile := range matches { + if verbose { + log.Printf("log_file_path: %s", logFile) + } + + data, err := os.ReadFile(logFile) + if err != nil { + log.Printf("Warning: cannot read %s: %v", logFile, err) + continue + } + + for _, line := range strings.Split(string(data), "\n") { + reqIdx := strings.Index(line, "REQ -> ") + if reqIdx == -1 { + continue + } + + if verbose { + methodPos := strings.Index(line, "method") + if methodPos != -1 { + end := min(methodPos+40, len(line)) + log.Printf("Method %s found %s", line[methodPos:end], logFile) + } + } + + if !strings.Contains(line, method) { + continue + } + + methodCount++ + if methodCount == methodIndex { + return line[reqIdx+len("REQ -> "):], nil + } + } + } + + return "", nil +} diff --git a/internal/tools/replay_tx.go b/internal/tools/replay_tx.go new file mode 100644 index 00000000..ddc7894a --- /dev/null +++ b/internal/tools/replay_tx.go @@ -0,0 +1,192 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + "log" + "os" + "strconv" + "strings" + + "github.com/erigontech/rpc-tests/internal/rpc" + "github.com/urfave/cli/v2" +) + +const ( + silkTarget = "127.0.0.1:51515" + rpcdaemonTarget = "localhost:8545" + outputDir = "./output/" +) + +var replayTxCommand = &cli.Command{ + Name: "replay-tx", + Usage: "Scan blocks for transactions and compare trace responses between two servers", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "start", + Usage: "Starting point as block:tx (e.g., 1000:0)", + Value: "0:0", + }, + &cli.BoolFlag{ + Name: "continue", + Aliases: []string{"c"}, + Usage: "Continue scanning, don't stop at first diff", + }, + &cli.IntFlag{ + Name: "number", + Aliases: []string{"n"}, + Value: 0, + Usage: "Maximum number of failed txs before stopping", + }, + &cli.IntFlag{ + Name: "method", + Aliases: []string{"m"}, + Value: 0, + Usage: "0: trace_replayTransaction, 1: debug_traceTransaction", + }, + }, + Action: runReplayTx, +} + +func runReplayTx(c *cli.Context) error { + startStr := c.String("start") + continueOnDiff := c.Bool("continue") + maxFailed := c.Int("number") + methodID := c.Int("method") + + if maxFailed > 0 { + continueOnDiff = true + } + + parts := strings.SplitN(startStr, ":", 2) + if len(parts) != 2 { + return fmt.Errorf("bad start field definition: block:tx") + } + startBlock, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return fmt.Errorf("invalid start block: %w", err) + } + startTx, err := strconv.ParseInt(parts[1], 10, 64) + if err != nil { + return fmt.Errorf("invalid start tx: %w", err) + } + + log.Printf("Starting scans from: %d tx-index: %d", startBlock, startTx) + + // Clean and recreate output directory + os.RemoveAll(outputDir) + if err := os.MkdirAll(outputDir, 0755); err != nil { + return fmt.Errorf("create output dir: %w", err) + } + + makeRequest := makeTraceTransaction + if methodID == 1 { + makeRequest = makeDebugTraceTransaction + } + + client := rpc.NewClient("http", "", 0) + ctx := context.Background() + + failedRequest := 0 + for block := startBlock; block < 18000000; block++ { + fmt.Printf("%09d\r", block) + + // Get block with full transactions + hexBlock := "0x" + strconv.FormatInt(block, 16) + blockReq := fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["%s",true],"id":1}`, hexBlock) + + var blockResp map[string]any + _, err := client.Call(ctx, silkTarget, []byte(blockReq), &blockResp) + if err != nil { + continue + } + if blockResp["error"] != nil { + continue + } + result, ok := blockResp["result"].(map[string]any) + if !ok || result == nil { + continue + } + transactions, ok := result["transactions"].([]any) + if !ok || len(transactions) == 0 { + continue + } + + for txn := int(startTx); txn < len(transactions); txn++ { + tx, ok := transactions[txn].(map[string]any) + if !ok { + continue + } + input, _ := tx["input"].(string) + if len(input) < 2 { + continue + } + txHash, _ := tx["hash"].(string) + + res := compareTxResponses(ctx, client, makeRequest, block, txn, txHash) + if res == 1 { + log.Printf("Diff on block: %d tx-index: %d Hash: %s", block, txn, txHash) + if !continueOnDiff { + return fmt.Errorf("diff found") + } + if maxFailed > 0 { + failedRequest++ + if failedRequest >= maxFailed { + return fmt.Errorf("max failed requests reached: %d", maxFailed) + } + } + } + } + // Reset start tx after first block + startTx = 0 + } + + return nil +} + +type requestBuilder func(txHash string) string + +func makeTraceTransaction(txHash string) string { + return fmt.Sprintf(`{"jsonrpc":"2.0","method":"trace_replayTransaction","params":["%s",["vmTrace"]],"id":1}`, txHash) +} + +func makeDebugTraceTransaction(txHash string) string { + return fmt.Sprintf(`{"jsonrpc":"2.0","method":"debug_traceTransaction","params":["%s",{"disableMemory":false,"disableStack":false,"disableStorage":false}],"id":1}`, txHash) +} + +func compareTxResponses(ctx context.Context, client *rpc.Client, makeRequest requestBuilder, block int64, txIndex int, txHash string) int { + filename := fmt.Sprintf("bn_%d_txn_%d_hash_%s", block, txIndex, txHash) + silkFilename := outputDir + filename + ".silk" + rpcdaemonFilename := outputDir + filename + ".rpcdaemon" + diffFilename := outputDir + filename + ".diffs" + + request := makeRequest(txHash) + + var silkResp, rpcdaemonResp any + _, err1 := client.Call(ctx, silkTarget, []byte(request), &silkResp) + _, err2 := client.Call(ctx, rpcdaemonTarget, []byte(request), &rpcdaemonResp) + + if err1 != nil || err2 != nil { + log.Printf("Request error: silk=%v rpcdaemon=%v", err1, err2) + return 0 + } + + silkJSON, _ := json.MarshalIndent(silkResp, "", " ") + rpcdaemonJSON, _ := json.MarshalIndent(rpcdaemonResp, "", " ") + + _ = os.WriteFile(silkFilename, silkJSON, 0644) + _ = os.WriteFile(rpcdaemonFilename, rpcdaemonJSON, 0644) + + // Compare + if string(silkJSON) != string(rpcdaemonJSON) { + _ = os.WriteFile(diffFilename, []byte("DIFF"), 0644) + return 1 + } + + // Clean up if no diff + os.Remove(silkFilename) + os.Remove(rpcdaemonFilename) + os.Remove(diffFilename) + return 0 +} diff --git a/internal/tools/scan_block_receipts.go b/internal/tools/scan_block_receipts.go new file mode 100644 index 00000000..e8cdc79c --- /dev/null +++ b/internal/tools/scan_block_receipts.go @@ -0,0 +1,356 @@ +package tools + +import ( + "context" + "fmt" + "log" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "github.com/erigontech/rpc-tests/internal/eth" + "github.com/erigontech/rpc-tests/internal/rpc" + "github.com/urfave/cli/v2" +) + +var scanBlockReceiptsCommand = &cli.Command{ + Name: "scan-block-receipts", + Usage: "Verify receipts root via MPT trie for block ranges or latest blocks", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "url", + Value: "http://127.0.0.1:8545", + Usage: "HTTP URL of the Ethereum node", + }, + &cli.Int64Flag{ + Name: "start-block", + Value: -1, + Usage: "Starting block number (inclusive)", + }, + &cli.Int64Flag{ + Name: "end-block", + Value: -1, + Usage: "Ending block number (inclusive)", + }, + &cli.BoolFlag{ + Name: "beyond-latest", + Usage: "Scan next-after-latest blocks", + }, + &cli.BoolFlag{ + Name: "stop-at-reorg", + Usage: "Stop at first chain reorg", + }, + &cli.Float64Flag{ + Name: "interval", + Value: 0.1, + Usage: "Sleep interval between queries in seconds", + }, + }, + Action: runScanBlockReceipts, +} + +func runScanBlockReceipts(c *cli.Context) error { + url := c.String("url") + startBlock := c.Int64("start-block") + endBlock := c.Int64("end-block") + beyondLatest := c.Bool("beyond-latest") + stopAtReorg := c.Bool("stop-at-reorg") + interval := time.Duration(c.Float64("interval") * float64(time.Second)) + + isRangeMode := startBlock >= 0 && endBlock >= 0 + isLatestMode := startBlock < 0 && endBlock < 0 + + if !isRangeMode && !isLatestMode { + return fmt.Errorf("you must specify --start-block AND --end-block, or neither") + } + if isRangeMode && endBlock < startBlock { + return fmt.Errorf("end block %d must be >= start block %d", endBlock, startBlock) + } + + client := rpc.NewClient("http", "", 0) + target := strings.TrimPrefix(strings.TrimPrefix(url, "http://"), "https://") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigs + log.Printf("Received interrupt signal. Shutting down...") + cancel() + }() + + if isRangeMode { + return scanReceiptsRange(ctx, client, target, startBlock, endBlock) + } + if beyondLatest { + return scanReceiptsBeyondLatest(ctx, client, target, interval, stopAtReorg) + } + return scanReceiptsLatest(ctx, client, target, interval, stopAtReorg) +} + +func scanReceiptsRange(ctx context.Context, client *rpc.Client, target string, start, end int64) error { + log.Printf("Scanning block receipts from %d to %d...", start, end) + + for blockNum := start; blockNum <= end; blockNum++ { + if ctx.Err() != nil { + log.Printf("Scan terminated by user.") + return nil + } + + if err := verifyReceiptsRoot(ctx, client, target, blockNum); err != nil { + return err + } + } + + log.Printf("Successfully scanned and verified all receipts from %d to %d.", start, end) + return nil +} + +func scanReceiptsLatest(ctx context.Context, client *rpc.Client, target string, interval time.Duration, stopAtReorg bool) error { + log.Printf("Scanning latest blocks... Press Ctrl+C to stop.") + + var currentBlockNumber int64 + var previousBlockHash string + + for ctx.Err() == nil { + block, err := getFullBlock(ctx, client, target, "latest") + if err != nil { + log.Printf("Error: %v", err) + sleepCtx(ctx, 1*time.Second) + continue + } + + blockNum := hexToInt64(block["number"]) + if blockNum == currentBlockNumber { + sleepCtx(ctx, interval) + continue + } + + if currentBlockNumber > 0 && blockNum != currentBlockNumber+1 { + log.Printf("Warning: gap detected at block %d, node still syncing...", blockNum) + } + + // Check for reorg + reorgDetected := false + if previousBlockHash != "" && blockNum == currentBlockNumber+1 { + parentHash, _ := block["parentHash"].(string) + if parentHash != previousBlockHash { + log.Printf("Warning: REORG DETECTED at block %d", currentBlockNumber) + log.Printf("Expected parentHash: %s", previousBlockHash) + log.Printf("Actual parentHash: %s", parentHash) + reorgDetected = true + } + } + + currentBlockNumber = blockNum + previousBlockHash, _ = block["hash"].(string) + + if err := verifyBlockReceipts(ctx, client, target, block, reorgDetected); err != nil { + return err + } + + if reorgDetected && stopAtReorg { + log.Printf("Stopping scan due to reorg detection (receipts were checked).") + return nil + } + } + + return nil +} + +func scanReceiptsBeyondLatest(ctx context.Context, client *rpc.Client, target string, interval time.Duration, stopAtReorg bool) error { + log.Printf("Scanning next-after-latest blocks... Press Ctrl+C to stop.") + + var currentBlockNumber int64 + var previousBlockHash string + + for ctx.Err() == nil { + block, err := getFullBlock(ctx, client, target, "latest") + if err != nil { + log.Printf("Error: %v", err) + sleepCtx(ctx, 1*time.Second) + continue + } + + blockNum := hexToInt64(block["number"]) + if blockNum == currentBlockNumber { + sleepCtx(ctx, interval) + continue + } + + // Check for gap and reorg + gapDetected := false + reorgDetected := false + if currentBlockNumber > 0 && blockNum != currentBlockNumber+1 { + log.Printf("Warning: gap detected at block %d, node still syncing...", blockNum) + gapDetected = true + } + if previousBlockHash != "" && blockNum == currentBlockNumber+1 { + parentHash, _ := block["parentHash"].(string) + if parentHash != previousBlockHash { + log.Printf("Warning: REORG DETECTED at block %d", currentBlockNumber) + reorgDetected = true + } + } + + currentBlockNumber = blockNum + previousBlockHash, _ = block["hash"].(string) + + // Verify current block receipts on gap or reorg + if gapDetected || reorgDetected { + if err := verifyBlockReceipts(ctx, client, target, block, reorgDetected); err != nil { + return err + } + } + + // Aggressively query the next block + var nextBlock map[string]any + for ctx.Err() == nil { + nextBlock, err = getFullBlockByNumber(ctx, client, target, currentBlockNumber+1) + if err == nil && nextBlock != nil { + break + } + sleepCtx(ctx, interval) + } + if ctx.Err() != nil { + break + } + + if err := verifyBlockReceipts(ctx, client, target, nextBlock, reorgDetected); err != nil { + return err + } + + if reorgDetected && stopAtReorg { + log.Printf("Stopping scan due to reorg detection (receipts were checked).") + return nil + } + } + + return nil +} + +func verifyReceiptsRoot(ctx context.Context, client *rpc.Client, target string, blockNum int64) error { + block, err := getFullBlockByNumber(ctx, client, target, blockNum) + if err != nil { + return fmt.Errorf("get block %d: %w", blockNum, err) + } + if block == nil { + log.Printf("Block %d not found. Skipping.", blockNum) + return nil + } + + return verifyBlockReceipts(ctx, client, target, block, false) +} + +func verifyBlockReceipts(ctx context.Context, client *rpc.Client, target string, block map[string]any, reorgDetected bool) error { + blockNum := hexToInt64(block["number"]) + headerReceiptsRoot, _ := block["receiptsRoot"].(string) + blockHash, _ := block["hash"].(string) + + // Fetch receipts + receipts, err := fetchBlockReceiptsRaw(ctx, client, target, blockHash) + if err != nil { + log.Printf("Error fetching receipts for block %d: %v", blockNum, err) + return nil // Continue scanning + } + + computedRoot, err := eth.ComputeReceiptsRoot(receipts) + if err != nil { + log.Printf("Error computing receipts root for block %d: %v", blockNum, err) + return nil + } + + if computedRoot == headerReceiptsRoot { + if reorgDetected { + log.Printf("Block %d: Reorg detected, but receipts root IS valid.", blockNum) + } else { + log.Printf("Block %d: Receipts root verified (%d receipts).", blockNum, len(receipts)) + } + return nil + } + + log.Printf("CRITICAL: Receipt root mismatch detected at block %d", blockNum) + log.Printf("Expected header root: %s", headerReceiptsRoot) + log.Printf("Actual computed root: %s", computedRoot) + return fmt.Errorf("receipt root mismatch at block %d", blockNum) +} + +func getFullBlock(ctx context.Context, client *rpc.Client, target, tag string) (map[string]any, error) { + req := fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["%s",false],"id":1}`, tag) + var resp map[string]any + _, err := client.Call(ctx, target, []byte(req), &resp) + if err != nil { + return nil, err + } + if errVal, ok := resp["error"]; ok { + return nil, fmt.Errorf("RPC error: %v", errVal) + } + result, ok := resp["result"].(map[string]any) + if !ok { + return nil, fmt.Errorf("no result in response") + } + return result, nil +} + +func getFullBlockByNumber(ctx context.Context, client *rpc.Client, target string, blockNum int64) (map[string]any, error) { + hexNum := fmt.Sprintf("0x%x", blockNum) + return getFullBlock(ctx, client, target, hexNum) +} + +func fetchBlockReceiptsRaw(ctx context.Context, client *rpc.Client, target, blockHash string) ([]map[string]any, error) { + req := fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getBlockReceipts","params":["%s"],"id":1}`, blockHash) + var resp map[string]any + _, err := client.Call(ctx, target, []byte(req), &resp) + if err != nil { + return nil, err + } + if errVal, ok := resp["error"]; ok { + return nil, fmt.Errorf("RPC error: %v", errVal) + } + result, ok := resp["result"].([]any) + if !ok { + return nil, fmt.Errorf("unexpected result type") + } + + receipts := make([]map[string]any, 0, len(result)) + for _, r := range result { + receipt, ok := r.(map[string]any) + if !ok { + continue + } + receipts = append(receipts, receipt) + } + return receipts, nil +} + +func hexToInt64(v any) int64 { + s, ok := v.(string) + if !ok { + return 0 + } + s = strings.TrimPrefix(s, "0x") + var result int64 + for _, c := range s { + result <<= 4 + switch { + case c >= '0' && c <= '9': + result |= int64(c - '0') + case c >= 'a' && c <= 'f': + result |= int64(c - 'a' + 10) + case c >= 'A' && c <= 'F': + result |= int64(c - 'A' + 10) + } + } + return result +} + +func sleepCtx(ctx context.Context, d time.Duration) { + select { + case <-ctx.Done(): + case <-time.After(d): + } +} diff --git a/internal/tools/subscriptions.go b/internal/tools/subscriptions.go new file mode 100644 index 00000000..bec232a8 --- /dev/null +++ b/internal/tools/subscriptions.go @@ -0,0 +1,145 @@ +package tools + +import ( + "encoding/json" + "fmt" + "log" + "os" + "os/signal" + "syscall" + + "github.com/erigontech/rpc-tests/internal/rpc" + "github.com/urfave/cli/v2" +) + +// USDT contract address on mainnet +const usdtAddress = "0xdac17f958d2ee523a2206206994597c13d831ec7" + +// ERC20 Transfer event topic (with final 'f') +const transferTopicFull = "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef" + +var subscriptionsCommand = &cli.Command{ + Name: "subscriptions", + Usage: "Subscribe to newHeads and USDT Transfer logs via WebSocket", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "url", + Value: "ws://127.0.0.1:8545", + Usage: "WebSocket URL of the Ethereum node", + }, + }, + Action: runSubscriptions, +} + +type subscriptionNotification struct { + Jsonrpc string `json:"jsonrpc"` + Method string `json:"method"` + Params struct { + Subscription string `json:"subscription"` + Result json.RawMessage `json:"result"` + } `json:"params"` + // For subscribe responses + ID *int `json:"id,omitempty"` + Result string `json:"result,omitempty"` +} + +func runSubscriptions(c *cli.Context) error { + url := c.String("url") + + conn, err := rpc.Dial(url) + if err != nil { + return fmt.Errorf("connect to %s: %w", url, err) + } + defer conn.Close() + log.Printf("Successfully connected to Ethereum node at %s", url) + + // Subscribe to newHeads + var newHeadsResp jsonRPCResponse + err = conn.CallJSON(jsonRPCRequest{ + Jsonrpc: "2.0", + Method: "eth_subscribe", + Params: []any{"newHeads"}, + ID: 1, + }, &newHeadsResp) + if err != nil { + return fmt.Errorf("subscribe newHeads: %w", err) + } + if newHeadsResp.Error != nil { + return fmt.Errorf("subscribe newHeads RPC error: %v", newHeadsResp.Error) + } + newHeadsSubID, _ := newHeadsResp.Result.(string) + log.Printf("Subscribed to newHeads: %s", newHeadsSubID) + + // Subscribe to USDT Transfer logs + var logsResp jsonRPCResponse + err = conn.CallJSON(jsonRPCRequest{ + Jsonrpc: "2.0", + Method: "eth_subscribe", + Params: []any{"logs", map[string]any{ + "address": usdtAddress, + "topics": []string{transferTopicFull}, + }}, + ID: 2, + }, &logsResp) + if err != nil { + return fmt.Errorf("subscribe logs: %w", err) + } + if logsResp.Error != nil { + return fmt.Errorf("subscribe logs RPC error: %v", logsResp.Error) + } + logsSubID, _ := logsResp.Result.(string) + log.Printf("Subscribed to USDT logs: %s", logsSubID) + + log.Printf("Handle subscriptions started: 2") + + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + done := make(chan struct{}) + go func() { + <-sigs + log.Printf("Received interrupt signal") + // Unsubscribe + var resp jsonRPCResponse + _ = conn.CallJSON(jsonRPCRequest{ + Jsonrpc: "2.0", + Method: "eth_unsubscribe", + Params: []any{newHeadsSubID}, + ID: 3, + }, &resp) + _ = conn.CallJSON(jsonRPCRequest{ + Jsonrpc: "2.0", + Method: "eth_unsubscribe", + Params: []any{logsSubID}, + ID: 4, + }, &resp) + close(done) + }() + + // Listen for incoming subscription events + for { + select { + case <-done: + log.Printf("Handle subscriptions terminated") + return nil + default: + } + + var notification subscriptionNotification + if err := conn.RecvJSON(¬ification); err != nil { + select { + case <-done: + log.Printf("Handle subscriptions terminated") + return nil + default: + return fmt.Errorf("receive notification: %w", err) + } + } + + if notification.Params.Subscription == newHeadsSubID { + fmt.Printf("New block header: %s\n\n", notification.Params.Result) + } else if notification.Params.Subscription == logsSubID { + fmt.Printf("Log receipt: %s\n\n", notification.Params.Result) + } + } +} diff --git a/internal/tools/tools.go b/internal/tools/tools.go new file mode 100644 index 00000000..7e26119b --- /dev/null +++ b/internal/tools/tools.go @@ -0,0 +1,35 @@ +package tools + +import ( + "github.com/urfave/cli/v2" +) + +// subcommandNames is the set of known subcommand names for fast lookup. +var subcommandNames map[string]bool + +func init() { + subcommandNames = make(map[string]bool) + for _, cmd := range Commands() { + subcommandNames[cmd.Name] = true + } +} + +// IsSubcommand returns true if the given name matches a registered subcommand. +func IsSubcommand(name string) bool { + return subcommandNames[name] +} + +// Commands returns all tool subcommands. +func Commands() []*cli.Command { + return []*cli.Command{ + blockByNumberCommand, + emptyBlocksCommand, + filterChangesCommand, + latestBlockLogsCommand, + subscriptionsCommand, + graphqlCommand, + replayRequestCommand, + replayTxCommand, + scanBlockReceiptsCommand, + } +} diff --git a/internal/tools/tools_test.go b/internal/tools/tools_test.go new file mode 100644 index 00000000..f9278e0b --- /dev/null +++ b/internal/tools/tools_test.go @@ -0,0 +1,49 @@ +package tools + +import ( + "testing" +) + +func TestIsSubcommand(t *testing.T) { + known := []string{ + "block-by-number", + "empty-blocks", + "filter-changes", + "latest-block-logs", + "subscriptions", + "graphql", + "replay-request", + "replay-tx", + "scan-block-receipts", + } + for _, name := range known { + if !IsSubcommand(name) { + t.Errorf("IsSubcommand(%q) = false, want true", name) + } + } + + unknown := []string{"-c", "--help", "foo", "run", ""} + for _, name := range unknown { + if IsSubcommand(name) { + t.Errorf("IsSubcommand(%q) = true, want false", name) + } + } +} + +func TestCommandsCount(t *testing.T) { + cmds := Commands() + if len(cmds) != 9 { + t.Errorf("Commands() returned %d commands, want 9", len(cmds)) + } +} + +func TestCommandsHaveAction(t *testing.T) { + for _, cmd := range Commands() { + if cmd.Action == nil { + t.Errorf("command %q has nil Action", cmd.Name) + } + if cmd.Usage == "" { + t.Errorf("command %q has empty Usage", cmd.Name) + } + } +} From a396f9883507f49928fb4600441a6c2a9ccfa81c Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Sat, 14 Feb 2026 23:00:34 +0100 Subject: [PATCH 66/87] Fix receipt RLP double-encoding, GraphQL content type, and subscriptions shutdown - Fix receipt encoding bug: logs were double-encoded via rlpEncodeBytes on already-RLP-encoded log entries. Use rlpEncodeListFromRLP instead, which correctly treats each encoded log as an already-RLP item. Verified against 100 recent sepolia blocks (up to 406 receipts each). - Fix GraphQL content type: use application/json with {"query":"..."} body instead of application/graphql. - Fix subscriptions shutdown: close connection instead of sending eth_unsubscribe (which races with the notification read loop). Signal done channel first, then close connection to break RecvJSON cleanly. Co-Authored-By: Claude Opus 4.6 --- internal/eth/receipt.go | 16 ++-------------- internal/tools/graphql.go | 9 ++++++--- internal/tools/subscriptions.go | 28 +++++----------------------- 3 files changed, 13 insertions(+), 40 deletions(-) diff --git a/internal/eth/receipt.go b/internal/eth/receipt.go index ee73203e..e08d0677 100644 --- a/internal/eth/receipt.go +++ b/internal/eth/receipt.go @@ -81,8 +81,8 @@ func encodeReceipt(receipt map[string]any) ([]byte, error) { bloomBytes := hexToBytes(bloomHex) items = append(items, rlpEncodeBytes(bloomBytes)) - // logs - items = append(items, rlpEncodeList(encodedLogs)) + // logs (each encodedLog is already a full RLP-encoded list) + items = append(items, rlpEncodeListFromRLP(encodedLogs)) value := rlpEncodeListFromRLP(items) @@ -145,18 +145,6 @@ func rlpEncodeBytes(b []byte) []byte { return append(prefix, b...) } -func rlpEncodeList(items [][]byte) []byte { - return rlpEncodeListFromRLP(encodeItemsToRLP(items)) -} - -func encodeItemsToRLP(items [][]byte) [][]byte { - var rlpItems [][]byte - for _, item := range items { - rlpItems = append(rlpItems, rlpEncodeBytes(item)) - } - return rlpItems -} - func rlpEncodeListFromRLP(rlpItems [][]byte) []byte { var payload []byte for _, item := range rlpItems { diff --git a/internal/tools/graphql.go b/internal/tools/graphql.go index b46de635..78708062 100644 --- a/internal/tools/graphql.go +++ b/internal/tools/graphql.go @@ -80,12 +80,15 @@ func runGraphQL(c *cli.Context) error { } func executeGraphQLQuery(client *http.Client, url, query string) ([]byte, error) { - body := []byte(query) - req, err := http.NewRequest("POST", url, bytes.NewReader(body)) + payload, err := json.Marshal(map[string]string{"query": query}) + if err != nil { + return nil, fmt.Errorf("marshal query: %w", err) + } + req, err := http.NewRequest("POST", url, bytes.NewReader(payload)) if err != nil { return nil, fmt.Errorf("create request: %w", err) } - req.Header.Set("Content-Type", "application/graphql") + req.Header.Set("Content-Type", "application/json") resp, err := client.Do(req) if err != nil { diff --git a/internal/tools/subscriptions.go b/internal/tools/subscriptions.go index bec232a8..12eeba46 100644 --- a/internal/tools/subscriptions.go +++ b/internal/tools/subscriptions.go @@ -99,32 +99,13 @@ func runSubscriptions(c *cli.Context) error { go func() { <-sigs log.Printf("Received interrupt signal") - // Unsubscribe - var resp jsonRPCResponse - _ = conn.CallJSON(jsonRPCRequest{ - Jsonrpc: "2.0", - Method: "eth_unsubscribe", - Params: []any{newHeadsSubID}, - ID: 3, - }, &resp) - _ = conn.CallJSON(jsonRPCRequest{ - Jsonrpc: "2.0", - Method: "eth_unsubscribe", - Params: []any{logsSubID}, - ID: 4, - }, &resp) + // Signal done first, then close connection to break the read loop close(done) + conn.Close() }() // Listen for incoming subscription events for { - select { - case <-done: - log.Printf("Handle subscriptions terminated") - return nil - default: - } - var notification subscriptionNotification if err := conn.RecvJSON(¬ification); err != nil { select { @@ -136,9 +117,10 @@ func runSubscriptions(c *cli.Context) error { } } - if notification.Params.Subscription == newHeadsSubID { + switch notification.Params.Subscription { + case newHeadsSubID: fmt.Printf("New block header: %s\n\n", notification.Params.Result) - } else if notification.Params.Subscription == logsSubID { + case logsSubID: fmt.Printf("Log receipt: %s\n\n", notification.Params.Result) } } From f4732908adcc760d33cbca0c1d1bd7073d0030b4 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Sat, 14 Feb 2026 23:06:00 +0100 Subject: [PATCH 67/87] Update CLAUDE.md with subcommands, new packages, and gotchas Document the subcommand dispatch system, internal/tools/ and internal/eth/ packages, subcommand usage examples, the no-go-ethereum constraint, and key gotchas discovered during live verification (RLP double-encoding, Erigon old-block receipts, WebSocket shutdown, GraphQL content type). Co-Authored-By: Claude Opus 4.6 --- CLAUDE.md | 42 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index b2d4cb88..0eb56d7f 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -12,9 +12,15 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co # Build the integration test binary go build -o ./build/bin/rpc_int ./cmd/integration/main.go -# Run Go unit tests -go test ./internal/archive/ -go test ./internal/jsondiff/ +# Run all Go tests +go test ./... + +# Run a single package's tests +go test ./internal/eth/ +go test ./internal/tools/ + +# Lint +golangci-lint run # Run Python unit tests pytest @@ -26,6 +32,17 @@ pytest ./build/bin/rpc_int -A eth_call # All tests for one API ./build/bin/rpc_int -a eth_ -c -f -S # APIs matching pattern, serial mode ./build/bin/rpc_int -b sepolia -c -f # Different network + +# Run subcommands (ported from Python scripts in src/rpctests/) +./build/bin/rpc_int block-by-number --url ws://127.0.0.1:8545 +./build/bin/rpc_int empty-blocks --url http://localhost:8545 --count 10 +./build/bin/rpc_int filter-changes --url ws://127.0.0.1:8545 +./build/bin/rpc_int latest-block-logs --url http://localhost:8545 +./build/bin/rpc_int subscriptions --url ws://127.0.0.1:8545 +./build/bin/rpc_int graphql --http-url http://127.0.0.1:8545/graphql --query '{block{number}}' +./build/bin/rpc_int replay-request --path /path/to/logs --url http://localhost:8551 --jwt /path/to/jwt +./build/bin/rpc_int replay-tx --start 1000000:0 --method 0 +./build/bin/rpc_int scan-block-receipts --url http://localhost:8545 --start-block 100 --end-block 200 ``` ## Architecture @@ -43,16 +60,20 @@ pytest 5. Compares actual response against expected response using JSON diff 6. Reports results with colored output, saves diffs to `{network}/results/` +**Subcommands** — `rpc_int` also serves as a host for standalone tool subcommands (ported from Python scripts in `src/rpctests/`). Dispatch is at the top of `cmd/integration/main.go`: if `os.Args[1]` matches a known subcommand, it delegates to a `urfave/cli/v2` app; otherwise falls through to the existing flag-based test runner. Subcommand implementations live in `internal/tools/`, one file per subcommand. + **Internal packages** under `internal/`: - `internal/archive/` — Extract test fixtures from tar/gzip/bzip2 archives - `internal/jsondiff/` — Pure Go JSON diff with colored output -- `internal/rpc/` — HTTP/WebSocket JSON-RPC client with JWT auth and compression support +- `internal/rpc/` — HTTP/WebSocket JSON-RPC client with JWT auth and compression support. Includes `wsconn.go` for persistent WebSocket connections (send/receive/call JSON-RPC). - `internal/compare/` — Response comparison (exact match, JSON diff, external diff) - `internal/config/` — Configuration, CLI flag parsing, JWT secret management - `internal/filter/` — Test filtering (API name, pattern, exclusion, latest block) - `internal/runner/` — Parallel test orchestration (worker pool, scheduling, stats) - `internal/testdata/` — Test discovery, fixture loading, types - `internal/perf/` — Performance test support (Vegeta integration, reporting) +- `internal/tools/` — Subcommand implementations (block-by-number, empty-blocks, filter-changes, latest-block-logs, subscriptions, graphql, replay-request, replay-tx, scan-block-receipts) +- `internal/eth/` — Ethereum primitives: RLP encoding, Keccak256, MPT (Modified Merkle-Patricia Trie) for computing receipts root hashes **Test fixture format** — each test is a JSON file (or tarball containing JSON): ```json @@ -84,6 +105,15 @@ Test data lives in `integration/{network}/{api_name}/test_NN.json` across networ ## Dependencies -Go 1.24. Key libraries: `gorilla/websocket` (WebSocket transport), `josephburnett/jd/v2` (JSON diffing), `tsenart/vegeta/v12` (load testing), `urfave/cli/v2` (CLI framework for v2), `golang-jwt/jwt/v5` (JWT auth), `dsnet/compress` (bzip2). +Go 1.24. Key libraries: `gorilla/websocket` (WebSocket transport), `josephburnett/jd/v2` (JSON diffing), `tsenart/vegeta/v12` (load testing), `urfave/cli/v2` (CLI framework for subcommands), `golang-jwt/jwt/v5` (JWT auth), `dsnet/compress` (bzip2), `golang.org/x/crypto` (Keccak256 for MPT). + +**Constraint: `github.com/ethereum/go-ethereum` must NOT be added as a dependency.** Ethereum primitives (RLP, Keccak256, MPT) are implemented from scratch in `internal/eth/`. + +Python 3.10+ with `requirements.txt` for legacy runner and standalone tools in `src/rpctests/`. + +## Known Issues & Gotchas -Python 3.10+ with `requirements.txt` for legacy runner and standalone tools in `src/rpctests/`. \ No newline at end of file +- **RLP encoding of already-encoded items**: When building RLP lists containing items that are already RLP-encoded (e.g., logs from `encodeLog()`), use `rlpEncodeListFromRLP()` which treats items as pre-encoded. Do NOT use `rlpEncodeBytes()` on them — that wraps the RLP list as a byte string, double-encoding it. +- **Erigon old-block receipts**: On some Erigon nodes, `eth_getBlockReceipts` for old blocks (e.g., sepolia block 999991) returns receipt data that doesn't match the block header's `receiptsRoot`. This is an Erigon issue (confirmed: go-ethereum's own `types.DeriveSha` also fails on the same data). Recent blocks work correctly. +- **WebSocket subscriptions shutdown**: When using `eth_subscribe`, don't send `eth_unsubscribe` during shutdown — it races with the notification read loop. Instead, signal a done channel then close the connection to break `RecvJSON`. +- **GraphQL content type**: Erigon's GraphQL endpoint requires `application/json` with `{"query":"..."}` body, not `application/graphql`. \ No newline at end of file From 01d92d978a816f6e88d1b41d1ec5453e4b89bd34 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Sat, 14 Feb 2026 23:50:25 +0100 Subject: [PATCH 68/87] Add --graphql flag to Erigon skill files Co-Authored-By: Claude Opus 4.6 --- .claude/skills/erigon-rpcdaemon-run/SKILL.md | 5 +++++ .claude/skills/erigon-run/SKILL.md | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/.claude/skills/erigon-rpcdaemon-run/SKILL.md b/.claude/skills/erigon-rpcdaemon-run/SKILL.md index 71d879b8..b55f9991 100644 --- a/.claude/skills/erigon-rpcdaemon-run/SKILL.md +++ b/.claude/skills/erigon-rpcdaemon-run/SKILL.md @@ -31,6 +31,11 @@ cd && ./build/bin/rpcdaemon --datadir= --http.p cd && ./build/bin/rpcdaemon --datadir= --ws ``` +### GraphQL support +```bash +cd && ./build/bin/rpcdaemon --datadir= --graphql +``` + ## Important Considerations ### Before Running diff --git a/.claude/skills/erigon-run/SKILL.md b/.claude/skills/erigon-run/SKILL.md index 35e61632..3f51ca63 100644 --- a/.claude/skills/erigon-run/SKILL.md +++ b/.claude/skills/erigon-run/SKILL.md @@ -31,6 +31,11 @@ cd && ./build/bin/erigon --datadir= --http.port cd && ./build/bin/erigon --datadir= --ws ``` +### GraphQL support +```bash +cd && ./build/bin/erigon --datadir= --graphql +``` + ## Important Considerations ### Before Running From b2d620a724ec303798c67de79b961ca159959ed1 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Sun, 15 Feb 2026 00:41:05 +0100 Subject: [PATCH 69/87] Fix all lint issues in internal/tools and internal/eth Co-Authored-By: Claude Opus 4.6 --- internal/eth/receipt.go | 12 ++++++++---- internal/tools/empty_blocks.go | 10 +++++----- internal/tools/replay_request.go | 8 ++++---- internal/tools/replay_tx.go | 4 ++-- internal/tools/scan_block_receipts.go | 4 ++-- 5 files changed, 21 insertions(+), 17 deletions(-) diff --git a/internal/eth/receipt.go b/internal/eth/receipt.go index e08d0677..f64df567 100644 --- a/internal/eth/receipt.go +++ b/internal/eth/receipt.go @@ -101,13 +101,13 @@ func encodeLog(logMap map[string]any) ([]byte, error) { topicsRaw, _ := logMap["topics"].([]any) data, _ := logMap["data"].(string) - var items [][]byte + items := make([][]byte, 0, 3) // address items = append(items, rlpEncodeBytes(hexToBytes(address))) // topics - var topicItems [][]byte + topicItems := make([][]byte, 0, len(topicsRaw)) for _, t := range topicsRaw { topicStr, _ := t.(string) topicItems = append(topicItems, rlpEncodeBytes(hexToBytes(topicStr))) @@ -146,7 +146,11 @@ func rlpEncodeBytes(b []byte) []byte { } func rlpEncodeListFromRLP(rlpItems [][]byte) []byte { - var payload []byte + totalLen := 0 + for _, item := range rlpItems { + totalLen += len(item) + } + payload := make([]byte, 0, totalLen) for _, item := range rlpItems { payload = append(payload, item...) } @@ -208,7 +212,7 @@ func keccak256(data []byte) []byte { // mpt is a simple implementation of Ethereum's Modified Merkle-Patricia Trie // sufficient for computing root hashes of receipt tries. type mpt struct { - db map[string][]byte + db map[string][]byte root []byte } diff --git a/internal/tools/empty_blocks.go b/internal/tools/empty_blocks.go index 1b41b686..11014280 100644 --- a/internal/tools/empty_blocks.go +++ b/internal/tools/empty_blocks.go @@ -42,12 +42,12 @@ var emptyBlocksCommand = &cli.Command{ } type blockInfo struct { - Number uint64 - Transactions []any - Withdrawals []any + Number uint64 + Transactions []any + Withdrawals []any HasWithdrawals bool - StateRoot string - ParentHash string + StateRoot string + ParentHash string } func runEmptyBlocks(c *cli.Context) error { diff --git a/internal/tools/replay_request.go b/internal/tools/replay_request.go index 06d0fb17..fbd05c91 100644 --- a/internal/tools/replay_request.go +++ b/internal/tools/replay_request.go @@ -192,9 +192,9 @@ func findJSONRPCRequest(logDir, method string, methodIndex int, verbose bool) (s continue } - for _, line := range strings.Split(string(data), "\n") { - reqIdx := strings.Index(line, "REQ -> ") - if reqIdx == -1 { + for line := range strings.SplitSeq(string(data), "\n") { + _, reqBody, found := strings.Cut(line, "REQ -> ") + if !found { continue } @@ -212,7 +212,7 @@ func findJSONRPCRequest(logDir, method string, methodIndex int, verbose bool) (s methodCount++ if methodCount == methodIndex { - return line[reqIdx+len("REQ -> "):], nil + return reqBody, nil } } } diff --git a/internal/tools/replay_tx.go b/internal/tools/replay_tx.go index ddc7894a..809457ac 100644 --- a/internal/tools/replay_tx.go +++ b/internal/tools/replay_tx.go @@ -14,9 +14,9 @@ import ( ) const ( - silkTarget = "127.0.0.1:51515" + silkTarget = "127.0.0.1:51515" rpcdaemonTarget = "localhost:8545" - outputDir = "./output/" + outputDir = "./output/" ) var replayTxCommand = &cli.Command{ diff --git a/internal/tools/scan_block_receipts.go b/internal/tools/scan_block_receipts.go index e8cdc79c..59936df9 100644 --- a/internal/tools/scan_block_receipts.go +++ b/internal/tools/scan_block_receipts.go @@ -98,7 +98,7 @@ func scanReceiptsRange(ctx context.Context, client *rpc.Client, target string, s for blockNum := start; blockNum <= end; blockNum++ { if ctx.Err() != nil { log.Printf("Scan terminated by user.") - return nil + return nil //nolint:nilerr // graceful shutdown on signal } if err := verifyReceiptsRoot(ctx, client, target, blockNum); err != nil { @@ -217,7 +217,7 @@ func scanReceiptsBeyondLatest(ctx context.Context, client *rpc.Client, target st sleepCtx(ctx, interval) } if ctx.Err() != nil { - break + return nil //nolint:nilerr // graceful shutdown on signal } if err := verifyBlockReceipts(ctx, client, target, nextBlock, reorgDetected); err != nil { From 35babe6382359ee63a7b40239167c6797ed70ace Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Sun, 15 Feb 2026 00:50:36 +0100 Subject: [PATCH 70/87] Enable errcheck for os.Remove/RemoveAll and fix all unhandled errors Remove std-error-handling preset and os.Remove* exclusions from golangci config so unhandled file operation errors are flagged. Add explicit Close/Flush exclusion rule and set max-same-issues to 0 so no warnings are hidden. Co-Authored-By: Claude Opus 4.6 --- .golangci.yml | 13 +++++++++---- internal/compare/comparator.go | 8 ++++---- internal/runner/runner.go | 4 ++-- internal/tools/graphql.go | 8 ++++---- internal/tools/replay_tx.go | 8 ++++---- 5 files changed, 23 insertions(+), 18 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 041f093b..0640398c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -42,9 +42,6 @@ linters: - (*bufio.Writer).Write - (net/http.ResponseWriter).Write - (*encoding/json.Encoder).Encode - - os.Remove - - os.RemoveAll - - os.MkdirAll govet: enable-all: true disable: @@ -61,14 +58,18 @@ linters: exclusions: presets: - comments - - std-error-handling rules: # Test files: relax errcheck - linters: [errcheck] path: _test\.go + # defer Close/Flush: universally safe to ignore + - linters: [errcheck] + text: "Error return value of .*(Close|Flush).* is not checked" # bodyclose false positives on websocket dial and raw HTTP handler - path: internal/rpc/websocket\.go linters: [bodyclose] + - path: internal/rpc/wsconn\.go + linters: [bodyclose] - path: internal/rpc/http\.go text: "response body must be closed" linters: [bodyclose] @@ -78,6 +79,10 @@ formatters: - gofmt - goimports +issues: + max-same-issues: 0 + max-issues-per-linter: 0 + output: sort-order: - file diff --git a/internal/compare/comparator.go b/internal/compare/comparator.go index 9f03f1de..56551419 100644 --- a/internal/compare/comparator.go +++ b/internal/compare/comparator.go @@ -16,8 +16,8 @@ import ( "github.com/josephburnett/jd/v2" jsoniter "github.com/json-iterator/go" - "github.com/erigontech/rpc-tests/internal/jsondiff" "github.com/erigontech/rpc-tests/internal/config" + "github.com/erigontech/rpc-tests/internal/jsondiff" "github.com/erigontech/rpc-tests/internal/testdata" ) @@ -180,9 +180,9 @@ func ProcessResponse( } if same && !cfg.ForceDumpJSONs { - os.Remove(daemonFile) - os.Remove(expRspFile) - os.Remove(diffFile) + _ = os.Remove(daemonFile) + _ = os.Remove(expRspFile) + _ = os.Remove(diffFile) } outcome.Success = same diff --git a/internal/runner/runner.go b/internal/runner/runner.go index 2a78874b..095799da 100644 --- a/internal/runner/runner.go +++ b/internal/runner/runner.go @@ -254,13 +254,13 @@ done: } subfolder := fmt.Sprintf("%s/%s", cfg.OutputDir, entry.Name()) if subEntries, err := os.ReadDir(subfolder); err == nil && len(subEntries) == 0 { - os.Remove(subfolder) + _ = os.Remove(subfolder) } } } // Clean temp dir - os.RemoveAll(config.TempDirName) + _ = os.RemoveAll(config.TempDirName) // Print summary elapsed := time.Since(startTime) diff --git a/internal/tools/graphql.go b/internal/tools/graphql.go index 78708062..a6453337 100644 --- a/internal/tools/graphql.go +++ b/internal/tools/graphql.go @@ -107,7 +107,7 @@ func executeGraphQLTests(client *http.Client, httpURL, testsURL string, stopAtEr } defer func() { log.Printf("Cleaning up temporary directory: %s", tempDir) - os.RemoveAll(tempDir) + _ = os.RemoveAll(tempDir) }() log.Printf("Starting test execution using files from %s", tempDir) @@ -275,20 +275,20 @@ func downloadGitHubDirectory(client *http.Client, treeURL string) (string, error resp, err := client.Get(apiURL) if err != nil { - os.RemoveAll(tempDir) + _ = os.RemoveAll(tempDir) return "", fmt.Errorf("fetch GitHub API: %w", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - os.RemoveAll(tempDir) + _ = os.RemoveAll(tempDir) body, _ := io.ReadAll(resp.Body) return "", fmt.Errorf("GitHub API error %d: %s", resp.StatusCode, string(body[:min(len(body), 100)])) } var contents []githubContent if err := json.NewDecoder(resp.Body).Decode(&contents); err != nil { - os.RemoveAll(tempDir) + _ = os.RemoveAll(tempDir) return "", fmt.Errorf("decode GitHub API response: %w", err) } diff --git a/internal/tools/replay_tx.go b/internal/tools/replay_tx.go index 809457ac..c0a73050 100644 --- a/internal/tools/replay_tx.go +++ b/internal/tools/replay_tx.go @@ -75,7 +75,7 @@ func runReplayTx(c *cli.Context) error { log.Printf("Starting scans from: %d tx-index: %d", startBlock, startTx) // Clean and recreate output directory - os.RemoveAll(outputDir) + _ = os.RemoveAll(outputDir) if err := os.MkdirAll(outputDir, 0755); err != nil { return fmt.Errorf("create output dir: %w", err) } @@ -185,8 +185,8 @@ func compareTxResponses(ctx context.Context, client *rpc.Client, makeRequest req } // Clean up if no diff - os.Remove(silkFilename) - os.Remove(rpcdaemonFilename) - os.Remove(diffFilename) + _ = os.Remove(silkFilename) + _ = os.Remove(rpcdaemonFilename) + _ = os.Remove(diffFilename) return 0 } From 66a7eeafefde7de8b43bd745040f167834f819ad Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Mon, 16 Feb 2026 18:44:00 +0100 Subject: [PATCH 71/87] remove metadata section in response files --- .../test_03.json | 9 +- .../test_04.json | 9 +- .../test_10.json | 9 +- .../test_11.json | 9 +- .../test_18.json | 9 +- .../mainnet/eth_createAccessList/test_08.json | 8 +- .../mainnet/eth_createAccessList/test_09.json | 8 +- .../mainnet/eth_createAccessList/test_17.json | 9 +- .../mainnet/eth_createAccessList/test_18.json | 52 +++++------- .../mainnet/eth_createAccessList/test_19.json | 53 ++++++------ .../mainnet/eth_createAccessList/test_20.json | 54 ++++++------ .../mainnet/eth_createAccessList/test_22.json | 84 +++++++++---------- 12 files changed, 123 insertions(+), 190 deletions(-) diff --git a/integration/mainnet/debug_getModifiedAccountsByNumber/test_03.json b/integration/mainnet/debug_getModifiedAccountsByNumber/test_03.json index 993995e8..4cbc8b4a 100644 --- a/integration/mainnet/debug_getModifiedAccountsByNumber/test_03.json +++ b/integration/mainnet/debug_getModifiedAccountsByNumber/test_03.json @@ -3,12 +3,7 @@ "test": { "id": "debug_getModifiedAccountsByNumber_6002534_6002536", "reference": "", - "description": "modified accounts between block 6002534 and 6002536", - "metadata": { - "response": { - "pathOptions": [{"@": ["result"], "^": ["SET"]}] - } - } + "description": "modified accounts between block 6002534 and 6002536" }, "request": { "id": 1, @@ -239,4 +234,4 @@ ] } } -] \ No newline at end of file +] diff --git a/integration/mainnet/debug_getModifiedAccountsByNumber/test_04.json b/integration/mainnet/debug_getModifiedAccountsByNumber/test_04.json index 73b98594..0f6aeab1 100644 --- a/integration/mainnet/debug_getModifiedAccountsByNumber/test_04.json +++ b/integration/mainnet/debug_getModifiedAccountsByNumber/test_04.json @@ -3,12 +3,7 @@ "test": { "id": "debug_getModifiedAccountsByNumber_6302128_6302130", "reference": "", - "description": "modified accounts between block 6302128 and 6302130", - "metadata": { - "response": { - "pathOptions": [{"@": ["result"], "^": ["SET"]}] - } - } + "description": "modified accounts between block 6302128 and 6302130" }, "request": { "id": 1, @@ -130,4 +125,4 @@ ] } } -] \ No newline at end of file +] diff --git a/integration/mainnet/debug_getModifiedAccountsByNumber/test_10.json b/integration/mainnet/debug_getModifiedAccountsByNumber/test_10.json index 1343eb79..f0cdf123 100644 --- a/integration/mainnet/debug_getModifiedAccountsByNumber/test_10.json +++ b/integration/mainnet/debug_getModifiedAccountsByNumber/test_10.json @@ -3,12 +3,7 @@ "test": { "id": "debug_getModifiedAccountsByNumber_6371578_6371580", "reference": "", - "description": "modified accounts between block 6371578 and 6371580", - "metadata": { - "response": { - "pathOptions": [{"@": ["result"], "^": ["SET"]}] - } - } + "description": "modified accounts between block 6371578 and 6371580" }, "request": { "id": 1, @@ -205,4 +200,4 @@ ] } } -] \ No newline at end of file +] diff --git a/integration/mainnet/debug_getModifiedAccountsByNumber/test_11.json b/integration/mainnet/debug_getModifiedAccountsByNumber/test_11.json index 5c6012d3..757506df 100644 --- a/integration/mainnet/debug_getModifiedAccountsByNumber/test_11.json +++ b/integration/mainnet/debug_getModifiedAccountsByNumber/test_11.json @@ -3,12 +3,7 @@ "test": { "id": "debug_getModifiedAccountsByNumber_1704000_1704002", "reference": "", - "description": "modified accounts between block 1704000 and 1704002", - "metadata": { - "response": { - "pathOptions": [{"@": ["result"], "^": ["SET"]}] - } - } + "description": "modified accounts between block 1704000 and 1704002" }, "request": { "id": 1, @@ -33,4 +28,4 @@ ] } } -] \ No newline at end of file +] diff --git a/integration/mainnet/debug_getModifiedAccountsByNumber/test_18.json b/integration/mainnet/debug_getModifiedAccountsByNumber/test_18.json index 804d07c9..b79db3c8 100644 --- a/integration/mainnet/debug_getModifiedAccountsByNumber/test_18.json +++ b/integration/mainnet/debug_getModifiedAccountsByNumber/test_18.json @@ -3,12 +3,7 @@ "test": { "id": "debug_getModifiedAccountsByNumber_17000000_17000001", "reference": "", - "description": "modified accounts between block 17000000 and 17000001", - "metadata": { - "response": { - "pathOptions": [{"@": ["result"], "^": ["SET"]}] - } - } + "description": "modified accounts between block 17000000 and 17000001" }, "request": { "id": 1, @@ -265,4 +260,4 @@ ] } } -] \ No newline at end of file +] diff --git a/integration/mainnet/eth_createAccessList/test_08.json b/integration/mainnet/eth_createAccessList/test_08.json index eb6794ef..cbdf7539 100644 --- a/integration/mainnet/eth_createAccessList/test_08.json +++ b/integration/mainnet/eth_createAccessList/test_08.json @@ -3,12 +3,7 @@ "test": { "id": "eth_createAccessList_at_block_12000000_approve", "reference": "https://etherscan.io/tx/0xdea8cf208ab5a75e25d168115f85ed5e612ad978b9b8ccc462f69feca5a9a6db", - "description": "one access list entry at block 12000000", - "metadata": { - "response": { - "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] - } - } + "description": "one access list entry at block 12000000" }, "request": { "jsonrpc": "2.0", @@ -51,4 +46,3 @@ } } ] - diff --git a/integration/mainnet/eth_createAccessList/test_09.json b/integration/mainnet/eth_createAccessList/test_09.json index 6375ebf0..02bd0551 100644 --- a/integration/mainnet/eth_createAccessList/test_09.json +++ b/integration/mainnet/eth_createAccessList/test_09.json @@ -3,12 +3,7 @@ "test": { "id": "eth_createAccessList_at_block_12000000_revert", "reference": "https://etherscan.io/tx/0xdea8cf208ab5a75e25d168115f85ed5e612ad978b9b8ccc462f69feca5a9a6db", - "description": "no access list entry at block 12000000", - "metadata": { - "response": { - "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] - } - } + "description": "no access list entry at block 12000000" }, "request": { "jsonrpc": "2.0", @@ -56,4 +51,3 @@ } } ] - diff --git a/integration/mainnet/eth_createAccessList/test_17.json b/integration/mainnet/eth_createAccessList/test_17.json index 3dec4bc3..935b8566 100644 --- a/integration/mainnet/eth_createAccessList/test_17.json +++ b/integration/mainnet/eth_createAccessList/test_17.json @@ -3,12 +3,7 @@ "test": { "id": "eth_createAccessList_at_block_130572_contract_deploy_and_access_list_with_to", "reference": "https://etherscan.io/tx/0x4ee2bbfbaf90afef640c6ddacde696e6c4b81f9f644fc8c78670b910f922236b", - "description": "at block 130572 with contract deploy and access list with to", - "metadata": { - "response": { - "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] - } - } + "description": "at block 130572 with contract deploy and access list with to" }, "request": { "jsonrpc": "2.0", @@ -66,4 +61,4 @@ } } } -] \ No newline at end of file +] diff --git a/integration/mainnet/eth_createAccessList/test_18.json b/integration/mainnet/eth_createAccessList/test_18.json index 7193bf20..bb2bf5ae 100644 --- a/integration/mainnet/eth_createAccessList/test_18.json +++ b/integration/mainnet/eth_createAccessList/test_18.json @@ -1,33 +1,27 @@ [ - { - "test": { - "reference": "https://etherscan.io/tx/0x4ee2bbfbaf90afef640c6ddacde696e6c4b81f9f644fc8c78670b910f922236b", - "description": "with contract deploy and accessList with to", - "metadata": { - "response": { - "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] - } - } + { + "test": { + "reference": "https://etherscan.io/tx/0x4ee2bbfbaf90afef640c6ddacde696e6c4b81f9f644fc8c78670b910f922236b", + "description": "with contract deploy and accessList with to" + }, + "request": { + "jsonrpc": "2.0", + "method": "eth_createAccessList", + "params": [ + { + "from": "0x3D0768da09CE77d25e2d998E6a7b6eD4b9116c2D", + "to": "0x630ea66c8c5dc205d45a978573fa86df5af1fe7a", + "gas": "0x23685", + "gasPrice": "0xDB5AAA975", + "data": "0x60606040526040516102b43803806102b48339016040526060805160600190602001505b5b33600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b806001600050908051906020019082805482825590600052602060002090601f01602090048101928215609e579182015b82811115609d5782518260005055916020019190600101906081565b5b50905060c5919060a9565b8082111560c1576000818150600090555060010160a9565b5090565b50505b506101dc806100d86000396000f30060606040526000357c01000000000000000000000000000000000000000000000000000000009004806341c0e1b514610044578063cfae32171461005157610042565b005b61004f6004506100ca565b005b61005c60045061015e565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100bc5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561015b57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b565b60206040519081016040528060008152602001506001600050805480601f016020809104026020016040519081016040528092919081815260200182805480156101cd57820191906000526020600020905b8154815290600101906020018083116101b057829003601f168201915b505050505090506101d9565b90560000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000c48656c6c6f20576f726c64210000000000000000000000000000000000000000" }, - "request": { - "jsonrpc":"2.0", - "method":"eth_createAccessList", - "params":[ - { - "from": "0x3D0768da09CE77d25e2d998E6a7b6eD4b9116c2D", - "to": "0x630ea66c8c5dc205d45a978573fa86df5af1fe7a", - "gas": "0x23685", - "gasPrice": "0xDB5AAA975", - "data": "0x60606040526040516102b43803806102b48339016040526060805160600190602001505b5b33600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b806001600050908051906020019082805482825590600052602060002090601f01602090048101928215609e579182015b82811115609d5782518260005055916020019190600101906081565b5b50905060c5919060a9565b8082111560c1576000818150600090555060010160a9565b5090565b50505b506101dc806100d86000396000f30060606040526000357c01000000000000000000000000000000000000000000000000000000009004806341c0e1b514610044578063cfae32171461005157610042565b005b61004f6004506100ca565b005b61005c60045061015e565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100bc5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561015b57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b565b60206040519081016040528060008152602001506001600050805480601f016020809104026020016040519081016040528092919081815260200182805480156101cd57820191906000526020600020905b8154815290600101906020018083116101b057829003601f168201915b505050505090506101d9565b90560000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000c48656c6c6f20576f726c64210000000000000000000000000000000000000000" - }, - "latest" - ], - "id":1 - }, - "response": { - "id":1, - "jsonrpc":"2.0" - } + "latest" + ], + "id": 1 + }, + "response": { + "id": 1, + "jsonrpc": "2.0" } + } ] - diff --git a/integration/mainnet/eth_createAccessList/test_19.json b/integration/mainnet/eth_createAccessList/test_19.json index 51b4f356..6e9ff3cc 100644 --- a/integration/mainnet/eth_createAccessList/test_19.json +++ b/integration/mainnet/eth_createAccessList/test_19.json @@ -1,33 +1,28 @@ [ - { - "test": { - "reference": "https://etherscan.io/tx/0x3d63e943b1d979968db3da08a9a0d355f71af46ec3481ce313afbad733f77776", - "description": "1 access list entry", - "metadata": { - "response": { - "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] - } - } + { + "test": { + "reference": "https://etherscan.io/tx/0x3d63e943b1d979968db3da08a9a0d355f71af46ec3481ce313afbad733f77776", + "description": "1 access list entry" + }, + "request": { + "jsonrpc": "2.0", + "method": "eth_createAccessList", + "params": [ + { + "from": "0x244A93D0f7F27b845060abEd22B23aD914C71B54", + "to": "0xcd4EC7b66fbc029C116BA9Ffb3e59351c20B5B06", + "gas": "0xE234", + "gasPrice": "0x1D91CA3600", + "data": "0xca120b1f000000000000000000000000244a93d0f7f27b845060abed22b23ad914c71b54eaafa8798f3f2c657b5f86f543682ec694db0344b923fe7a508c522d22938945000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000087df2103941187dc230eab1a148a587cb8a03afb00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000002" }, - "request": { - "jsonrpc":"2.0", - "method":"eth_createAccessList", - "params":[ - { - "from": "0x244A93D0f7F27b845060abEd22B23aD914C71B54", - "to": "0xcd4EC7b66fbc029C116BA9Ffb3e59351c20B5B06", - "gas": "0xE234", - "gasPrice": "0x1D91CA3600", - "data": "0xca120b1f000000000000000000000000244a93d0f7f27b845060abed22b23ad914c71b54eaafa8798f3f2c657b5f86f543682ec694db0344b923fe7a508c522d22938945000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000087df2103941187dc230eab1a148a587cb8a03afb00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000002" - }, - "latest" - ], - "id":1 - }, - "response": { - "id":1, - "jsonrpc":"2.0" - } + "latest" + ], + "id": 1 + }, + "response": { + "id": 1, + "jsonrpc": "2.0", + "result": null } + } ] - diff --git a/integration/mainnet/eth_createAccessList/test_20.json b/integration/mainnet/eth_createAccessList/test_20.json index 38ed1532..b2f261fb 100644 --- a/integration/mainnet/eth_createAccessList/test_20.json +++ b/integration/mainnet/eth_createAccessList/test_20.json @@ -1,34 +1,28 @@ [ - { - "test": { - "reference": "https://etherscan.io/tx/0x3d63e943b1d979968db3da08a9a0d355f71af46ec3481ce313afbad733f77776", - "description": "1 access list entry", - "metadata": { - "response": { - "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] - } - } + { + "test": { + "reference": "https://etherscan.io/tx/0x3d63e943b1d979968db3da08a9a0d355f71af46ec3481ce313afbad733f77776", + "description": "1 access list entry" + }, + "request": { + "jsonrpc": "2.0", + "method": "eth_createAccessList", + "params": [ + { + "from": "0xa9Ac1233699BDae25abeBae4f9Fb54DbB1b44700", + "to": "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", + "gas": "0xC2A005", + "gasPrice": "0x8000BEFC0", + "data": "0x095ea7b3000000000000000000000000a87eaf82f287a2c67cb74130906d5ac01f2f925100000000000000000000000000000000000000000000000000000000126af740" }, - "request": { - "jsonrpc":"2.0", - "method":"eth_createAccessList", - "params":[ - { - "from": "0xa9Ac1233699BDae25abeBae4f9Fb54DbB1b44700", - "to": "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", - "gas": "0xC2A005", - "gasPrice": "0x8000BEFC0", - "data": "0x095ea7b3000000000000000000000000a87eaf82f287a2c67cb74130906d5ac01f2f925100000000000000000000000000000000000000000000000000000000126af740" - - }, - "latest" - ], - "id":1 - }, - "response": { - "id":1, - "jsonrpc":"2.0" - } + "latest" + ], + "id": 1 + }, + "response": { + "id": 1, + "jsonrpc": "2.0", + "result": null } + } ] - diff --git a/integration/mainnet/eth_createAccessList/test_22.json b/integration/mainnet/eth_createAccessList/test_22.json index 849ae1e7..0f5f72ba 100644 --- a/integration/mainnet/eth_createAccessList/test_22.json +++ b/integration/mainnet/eth_createAccessList/test_22.json @@ -1,51 +1,43 @@ [ - { - "test": { - "reference": "", - "description": "access list with state Override", - "metadata": { - "response": { - "pathOptions": [{"@": ["result", "accessList", {}, "storageKeys"], "^": ["SET"]}] - } - } + { + "test": { + "reference": "", + "description": "access list with state Override" + }, + "request": { + "jsonrpc": "2.0", + "method": "eth_createAccessList", + "params": [ + { + "from": "0xF21079d225F4f3e7FDd3E258042a55cED651b2a1", + "to": "0xa9D53f7B4836a595db7E11A7f92F9EF3810E04B6", + "data": "0x46c474a60000000000000000000000000000000000000000000000000000000000bc614e00000000000000000000000000000000000000000000000000000000000f455a000000000000000000000000125f4b6650e205a987ba92972a87c833dcf5a84700000000000000000000000000000000000000000000000000000000000f455a" }, - "request": { - "jsonrpc":"2.0", - "method":"eth_createAccessList", - "params":[ - { - "from": "0xF21079d225F4f3e7FDd3E258042a55cED651b2a1", - "to": "0xa9D53f7B4836a595db7E11A7f92F9EF3810E04B6", - "data": "0x46c474a60000000000000000000000000000000000000000000000000000000000bc614e00000000000000000000000000000000000000000000000000000000000f455a000000000000000000000000125f4b6650e205a987ba92972a87c833dcf5a84700000000000000000000000000000000000000000000000000000000000f455a" - }, - - "latest", - { - "0xdAC17F958D2ee523a2206206994597C13D831ec7": { - "stateDiff": { - "0xe8668fbdcb49eea01f71c55f2c539b0953b137a92283519b2e33d48a03e1e3e7": "0x000000000000000000000000000000000000000000000000000000000098a0a8", - "0xa2d1bb9934928c34b005700965f54467eef400cea7fd9fbfc0d1d1eb2184eee8": "0x000000000000000000000000000000000000000000000000000000000098a0a8" - } - }, - "0xa9D53f7B4836a595db7E11A7f92F9EF3810E04B6": { - "stateDiff": { - "0xe8668fbdcb49eea01f71c55f2c539b0953b137a92283519b2e33d48a03e1e3e7": "0x000000000000000000000000000000000000000000000000000000000098a0a8", - "0xa2d1bb9934928c34b005700965f54467eef400cea7fd9fbfc0d1d1eb2184eee8": "0x000000000000000000000000000000000000000000000000000000000098a0a8" - } - }, - "0xF21079d225F4f3e7FDd3E258042a55cED651b2a1": { - "nonce": "0x1", - "balance": "0x1000A386f26fc10000" - } - } - ], - "id":1 - }, - "response": { - "id":1, - "jsonrpc":"2.0" + "latest", + { + "0xdAC17F958D2ee523a2206206994597C13D831ec7": { + "stateDiff": { + "0xe8668fbdcb49eea01f71c55f2c539b0953b137a92283519b2e33d48a03e1e3e7": "0x000000000000000000000000000000000000000000000000000000000098a0a8", + "0xa2d1bb9934928c34b005700965f54467eef400cea7fd9fbfc0d1d1eb2184eee8": "0x000000000000000000000000000000000000000000000000000000000098a0a8" + } + }, + "0xa9D53f7B4836a595db7E11A7f92F9EF3810E04B6": { + "stateDiff": { + "0xe8668fbdcb49eea01f71c55f2c539b0953b137a92283519b2e33d48a03e1e3e7": "0x000000000000000000000000000000000000000000000000000000000098a0a8", + "0xa2d1bb9934928c34b005700965f54467eef400cea7fd9fbfc0d1d1eb2184eee8": "0x000000000000000000000000000000000000000000000000000000000098a0a8" + } + }, + "0xF21079d225F4f3e7FDd3E258042a55cED651b2a1": { + "nonce": "0x1", + "balance": "0x1000A386f26fc10000" + } } + ], + "id": 1 + }, + "response": { + "id": 1, + "jsonrpc": "2.0" } + } ] - - From cfdfb2fafab47cbb2d008a1cde159e16a43e45a0 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Mon, 16 Feb 2026 19:22:52 +0100 Subject: [PATCH 72/87] fix after rebase --- integration/run_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/run_tests.py b/integration/run_tests.py index b9f4b6ec..70ed7cb7 100755 --- a/integration/run_tests.py +++ b/integration/run_tests.py @@ -1169,7 +1169,7 @@ def main(argv) -> int: print(f"Total unmarshalling time: {str(total_unmarshalling_time)}") print(f"No of json Diffs: {str(no_of_json_diffs)}") print(f"Test time-elapsed: {str(elapsed)}") - print(f"Available tests: {global_test_number - 1}") + print(f"Available tests: {global_test_number}") print(f"Available tested api: {available_tested_apis}") print(f"Number of loop: {test_rep + 1}") print(f"Number of executed tests: {executed_tests}") From 06c690bd60312346357af165ffb9778016586bfc Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Mon, 16 Feb 2026 21:14:06 +0100 Subject: [PATCH 73/87] fix exp rsp --- integration/mainnet/eth_createAccessList/test_19.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/integration/mainnet/eth_createAccessList/test_19.json b/integration/mainnet/eth_createAccessList/test_19.json index 6e9ff3cc..b00c79e4 100644 --- a/integration/mainnet/eth_createAccessList/test_19.json +++ b/integration/mainnet/eth_createAccessList/test_19.json @@ -21,8 +21,7 @@ }, "response": { "id": 1, - "jsonrpc": "2.0", - "result": null + "jsonrpc": "2.0" } } ] From dcd6aea7f4fbbab33841c19f1a9ae1daf224b683 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Tue, 17 Feb 2026 12:24:42 +0100 Subject: [PATCH 74/87] Remove JdLibrary diff mode and josephburnett/jd/v2 dependency JsonDiffGo handles unordered array comparison natively via SortArrays, eliminating the need for metadata.response.pathOptions in test fixtures and removing an external dependency. Co-Authored-By: Claude Opus 4.6 --- CLAUDE.md | 2 +- go.mod | 5 +- go.sum | 15 ------ internal/compare/comparator.go | 63 +---------------------- internal/compare/comparator_bench_test.go | 6 +-- internal/compare/comparator_test.go | 24 +++------ internal/config/config.go | 9 ++-- internal/config/config_test.go | 7 +-- internal/runner/executor.go | 4 +- internal/testdata/types.go | 20 ++----- 10 files changed, 25 insertions(+), 130 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 0eb56d7f..05faba24 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -105,7 +105,7 @@ Test data lives in `integration/{network}/{api_name}/test_NN.json` across networ ## Dependencies -Go 1.24. Key libraries: `gorilla/websocket` (WebSocket transport), `josephburnett/jd/v2` (JSON diffing), `tsenart/vegeta/v12` (load testing), `urfave/cli/v2` (CLI framework for subcommands), `golang-jwt/jwt/v5` (JWT auth), `dsnet/compress` (bzip2), `golang.org/x/crypto` (Keccak256 for MPT). +Go 1.24. Key libraries: `gorilla/websocket` (WebSocket transport), `tsenart/vegeta/v12` (load testing), `urfave/cli/v2` (CLI framework for subcommands), `golang-jwt/jwt/v5` (JWT auth), `dsnet/compress` (bzip2), `golang.org/x/crypto` (Keccak256 for MPT). **Constraint: `github.com/ethereum/go-ethereum` must NOT be added as a dependency.** Ethereum primitives (RLP, Keccak256, MPT) are implemented from scratch in `internal/eth/`. diff --git a/go.mod b/go.mod index 672be7f8..e6ccdce1 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/dsnet/compress v0.0.1 github.com/golang-jwt/jwt/v5 v5.3.0 github.com/gorilla/websocket v1.5.3 - github.com/josephburnett/jd/v2 v2.3.0 github.com/json-iterator/go v1.1.12 github.com/tsenart/vegeta/v12 v12.13.0 github.com/urfave/cli/v2 v2.27.7 @@ -17,8 +16,6 @@ require ( require ( github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/influxdata/tdigest v0.0.1 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -27,11 +24,11 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/stretchr/testify v1.10.0 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect golang.org/x/net v0.49.0 // indirect golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.41.0 // indirect golang.org/x/text v0.34.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 432aaa6a..f06b5391 100644 --- a/go.sum +++ b/go.sum @@ -10,10 +10,6 @@ github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654/go.mod h1:qm+vckxRlD github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q= github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -24,18 +20,12 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/influxdata/tdigest v0.0.1 h1:XpFptwYmnEKUqmkcDjrzffswZ3nvNeevbUSLPP/ZzIY= github.com/influxdata/tdigest v0.0.1/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= -github.com/josephburnett/jd/v2 v2.3.0 h1:AyNT0zSStJ2j28zutWDO4fkc95JoICryWQRmDTRzPTQ= -github.com/josephburnett/jd/v2 v2.3.0/go.mod h1:0I5+gbo7y8diuajJjm79AF44eqTheSJy1K7DSbIUFAQ= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= @@ -44,8 +34,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 h1:18kd+8ZUlt/ARXhljq+14TwAoKa61q6dX8jtwOf6DH8= github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -81,9 +69,6 @@ golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGm gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca h1:PupagGYwj8+I4ubCxcmcBRk3VlUWtTg5huQpZR9flmE= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= diff --git a/internal/compare/comparator.go b/internal/compare/comparator.go index 56551419..5b631663 100644 --- a/internal/compare/comparator.go +++ b/internal/compare/comparator.go @@ -13,7 +13,6 @@ import ( "sync" "time" - "github.com/josephburnett/jd/v2" jsoniter "github.com/json-iterator/go" "github.com/erigontech/rpc-tests/internal/config" @@ -41,7 +40,6 @@ const ( func ProcessResponse( response, referenceResponse, responseInFile any, cfg *config.Config, - cmd *testdata.JsonRpcCommand, outputDir, daemonFile, expRspFile, diffFile string, outcome *testdata.TestOutcome, ) { @@ -172,7 +170,7 @@ func ProcessResponse( } } } else { - same, err = compareJSON(cfg, cmd, daemonFile, expRspFile, diffFile, &outcome.Metrics) + same, err = compareJSON(cfg, daemonFile, expRspFile, diffFile, &outcome.Metrics) if err != nil { outcome.Error = err return @@ -311,12 +309,10 @@ func dumpJSONs(dump bool, daemonFile, expRspFile, outputDir string, response, ex } // compareJSON dispatches to the appropriate external diff tool. -func compareJSON(cfg *config.Config, cmd *testdata.JsonRpcCommand, daemonFile, expRspFile, diffFile string, metrics *testdata.TestMetrics) (bool, error) { +func compareJSON(cfg *config.Config, daemonFile, expRspFile, diffFile string, metrics *testdata.TestMetrics) (bool, error) { metrics.ComparisonCount++ switch cfg.DiffKind { - case config.JdLibrary: - return runCompareJD(cmd, expRspFile, daemonFile, diffFile) case config.JsonDiffTool: return runExternalCompare(true, "/dev/null", expRspFile, daemonFile, diffFile) case config.DiffTool: @@ -326,61 +322,6 @@ func compareJSON(cfg *config.Config, cmd *testdata.JsonRpcCommand, daemonFile, e } } -// runCompareJD uses the JD library for comparison, with 30s timeout and pathOptions support. -func runCompareJD(cmd *testdata.JsonRpcCommand, file1, file2, diffFile string) (bool, error) { - node1, err := jd.ReadJsonFile(file1) - if err != nil { - return false, err - } - node2, err := jd.ReadJsonFile(file2) - if err != nil { - return false, err - } - - type result struct { - diff jd.Diff - err error - } - - resChan := make(chan result, 1) - ctx, cancel := context.WithTimeout(context.Background(), externalToolTimeout) - defer cancel() - - go func() { - var d jd.Diff - if cmd.TestInfo != nil && cmd.TestInfo.Metadata != nil && cmd.TestInfo.Metadata.Response != nil && cmd.TestInfo.Metadata.Response.PathOptions != nil { - options, err := jd.ReadOptionsString(string(cmd.TestInfo.Metadata.Response.PathOptions)) - if err != nil { - resChan <- result{err: err} - return - } - d = node1.Diff(node2, options...) - } else { - d = node1.Diff(node2) - } - resChan <- result{diff: d} - }() - - select { - case <-ctx.Done(): - return false, fmt.Errorf("JSON diff (JD) timeout for files %s and %s", file1, file2) - case res := <-resChan: - if res.err != nil { - return false, res.err - } - diffString := res.diff.Render() - if err := os.WriteFile(diffFile, []byte(diffString), 0644); err != nil { - return false, err - } - // Check if diff file is empty (no differences) - info, err := os.Stat(diffFile) - if err != nil { - return false, err - } - return info.Size() == 0, nil - } -} - // runExternalCompare runs json-diff or diff as an external process with timeout. func runExternalCompare(useJsonDiff bool, errorFile, file1, file2, diffFile string) (bool, error) { var cmdStr string diff --git a/internal/compare/comparator_bench_test.go b/internal/compare/comparator_bench_test.go index 21ca8ea7..9e7f16f9 100644 --- a/internal/compare/comparator_bench_test.go +++ b/internal/compare/comparator_bench_test.go @@ -53,8 +53,7 @@ func BenchmarkProcessResponse_ExactMatch(b *testing.B) { b.ResetTimer() for b.Loop() { outcome := &testdata.TestOutcome{} - cmd := &testdata.JsonRpcCommand{} - ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + ProcessResponse(response, nil, expected, cfg, dir, "", "", "", outcome) } } @@ -73,8 +72,7 @@ func BenchmarkProcessResponse_DiffMismatch_JsonDiffGo(b *testing.B) { b.ResetTimer() for b.Loop() { outcome := &testdata.TestOutcome{} - cmd := &testdata.JsonRpcCommand{} - ProcessResponse(response, nil, expected, cfg, cmd, dir, daemonFile, expRspFile, diffFile, outcome) + ProcessResponse(response, nil, expected, cfg, dir, daemonFile, expRspFile, diffFile, outcome) } } diff --git a/internal/compare/comparator_test.go b/internal/compare/comparator_test.go index 895a8611..81ee95dd 100644 --- a/internal/compare/comparator_test.go +++ b/internal/compare/comparator_test.go @@ -47,11 +47,10 @@ func TestProcessResponse_WithoutCompare(t *testing.T) { cfg.WithoutCompareResults = true outcome := &testdata.TestOutcome{} - cmd := &testdata.JsonRpcCommand{} response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} - ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + ProcessResponse(response, nil, expected, cfg, dir, "", "", "", outcome) if !outcome.Success { t.Error("WithoutCompareResults should always succeed") @@ -63,11 +62,10 @@ func TestProcessResponse_ExactMatch(t *testing.T) { cfg := config.NewConfig() outcome := &testdata.TestOutcome{} - cmd := &testdata.JsonRpcCommand{} response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} - ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + ProcessResponse(response, nil, expected, cfg, dir, "", "", "", outcome) if !outcome.Success { t.Errorf("exact match should succeed, error: %v", outcome.Error) @@ -82,11 +80,10 @@ func TestProcessResponse_NullExpectedResult(t *testing.T) { cfg := config.NewConfig() outcome := &testdata.TestOutcome{} - cmd := &testdata.JsonRpcCommand{} response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0xabc"} expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": nil} - ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + ProcessResponse(response, nil, expected, cfg, dir, "", "", "", outcome) if !outcome.Success { t.Errorf("null expected result should be accepted, error: %v", outcome.Error) @@ -98,11 +95,10 @@ func TestProcessResponse_NullExpectedError(t *testing.T) { cfg := config.NewConfig() outcome := &testdata.TestOutcome{} - cmd := &testdata.JsonRpcCommand{} response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "error": map[string]any{"code": float64(-32000), "message": "some error"}} expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "error": nil} - ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + ProcessResponse(response, nil, expected, cfg, dir, "", "", "", outcome) if !outcome.Success { t.Errorf("null expected error should be accepted, error: %v", outcome.Error) @@ -114,11 +110,10 @@ func TestProcessResponse_EmptyExpected(t *testing.T) { cfg := config.NewConfig() outcome := &testdata.TestOutcome{} - cmd := &testdata.JsonRpcCommand{} response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} expected := map[string]any{"jsonrpc": "2.0", "id": float64(1)} - ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + ProcessResponse(response, nil, expected, cfg, dir, "", "", "", outcome) if !outcome.Success { t.Errorf("empty expected (just jsonrpc+id) should be accepted, error: %v", outcome.Error) @@ -131,11 +126,10 @@ func TestProcessResponse_DoNotCompareError(t *testing.T) { cfg.DoNotCompareError = true outcome := &testdata.TestOutcome{} - cmd := &testdata.JsonRpcCommand{} response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "error": map[string]any{"code": float64(-32000), "message": "err1"}} expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "error": map[string]any{"code": float64(-32001), "message": "err2"}} - ProcessResponse(response, nil, expected, cfg, cmd, dir, "", "", "", outcome) + ProcessResponse(response, nil, expected, cfg, dir, "", "", "", outcome) if !outcome.Success { t.Errorf("DoNotCompareError should accept different errors, error: %v", outcome.Error) @@ -152,11 +146,10 @@ func TestProcessResponse_DiffMismatch_JsonDiffGo(t *testing.T) { diffFile := filepath.Join(dir, "diff.json") outcome := &testdata.TestOutcome{} - cmd := &testdata.JsonRpcCommand{} response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} - ProcessResponse(response, nil, expected, cfg, cmd, dir, daemonFile, expRspFile, diffFile, outcome) + ProcessResponse(response, nil, expected, cfg, dir, daemonFile, expRspFile, diffFile, outcome) if outcome.Success { t.Error("mismatched responses should fail") @@ -177,11 +170,10 @@ func TestProcessResponse_DiffMismatch_SingleTest_HasColoredDiff(t *testing.T) { diffFile := filepath.Join(dir, "diff.json") outcome := &testdata.TestOutcome{} - cmd := &testdata.JsonRpcCommand{} response := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x1"} expected := map[string]any{"jsonrpc": "2.0", "id": float64(1), "result": "0x2"} - ProcessResponse(response, nil, expected, cfg, cmd, dir, daemonFile, expRspFile, diffFile, outcome) + ProcessResponse(response, nil, expected, cfg, dir, daemonFile, expRspFile, diffFile, outcome) if outcome.ColoredDiff == "" { t.Error("single test mode should produce colored diff on mismatch") diff --git a/internal/config/config.go b/internal/config/config.go index 9936a5da..7b0fd757 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -40,21 +40,18 @@ var JSON = jsoniter.ConfigCompatibleWithStandardLibrary type DiffKind int const ( - JdLibrary DiffKind = iota + JsonDiffGo DiffKind = iota JsonDiffTool DiffTool - JsonDiffGo ) func (k DiffKind) String() string { - return [...]string{"jd", "json-diff", "diff", "json-diff-go"}[k] + return [...]string{"json-diff-go", "json-diff", "diff"}[k] } // ParseDiffKind converts a string into a DiffKind enum type. func ParseDiffKind(s string) (DiffKind, error) { switch strings.ToLower(s) { - case "jd": - return JdLibrary, nil case "json-diff": return JsonDiffTool, nil case "diff": @@ -62,7 +59,7 @@ func ParseDiffKind(s string) (DiffKind, error) { case "json-diff-go": return JsonDiffGo, nil default: - return JdLibrary, fmt.Errorf("invalid DiffKind value: %s", s) + return JsonDiffGo, fmt.Errorf("invalid DiffKind value: %s", s) } } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index ec847a12..889b91ba 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -194,12 +194,10 @@ func TestParseDiffKind(t *testing.T) { want DiffKind err bool }{ - {"jd", JdLibrary, false}, {"json-diff", JsonDiffTool, false}, {"diff", DiffTool, false}, {"json-diff-go", JsonDiffGo, false}, - {"JD", JdLibrary, false}, - {"invalid", JdLibrary, true}, + {"invalid", JsonDiffGo, true}, } for _, tt := range tests { @@ -218,10 +216,9 @@ func TestDiffKind_String(t *testing.T) { kind DiffKind want string }{ - {JdLibrary, "jd"}, + {JsonDiffGo, "json-diff-go"}, {JsonDiffTool, "json-diff"}, {DiffTool, "diff"}, - {JsonDiffGo, "json-diff-go"}, } for _, tt := range tests { diff --git a/internal/runner/executor.go b/internal/runner/executor.go index 0cada179..4eea7a3d 100644 --- a/internal/runner/executor.go +++ b/internal/runner/executor.go @@ -81,7 +81,7 @@ func runCommand(ctx context.Context, cfg *config.Config, cmd *testdata.JsonRpcCo fmt.Printf("%s: [%v]\n", cfg.DaemonUnderTest, result) } - compare.ProcessResponse(result, nil, cmd.Response, cfg, cmd, outputDirName, daemonFile, expRspFile, diffFile, outcome) + compare.ProcessResponse(result, nil, cmd.Response, cfg, outputDirName, daemonFile, expRspFile, diffFile, outcome) } else { target = cfg.GetTarget(config.DaemonOnDefaultPort, descriptor.Name) @@ -113,7 +113,7 @@ func runCommand(ctx context.Context, cfg *config.Config, cmd *testdata.JsonRpcCo daemonFile = outputAPIFilename + config.GetJSONFilenameExt(config.DaemonOnDefaultPort, target) expRspFile = outputAPIFilename + config.GetJSONFilenameExt(cfg.DaemonAsReference, target1) - compare.ProcessResponse(result, result1, nil, cfg, cmd, outputDirName, daemonFile, expRspFile, diffFile, outcome) + compare.ProcessResponse(result, result1, nil, cfg, outputDirName, daemonFile, expRspFile, diffFile, outcome) } } diff --git a/internal/testdata/types.go b/internal/testdata/types.go index 8cce9082..42d22546 100644 --- a/internal/testdata/types.go +++ b/internal/testdata/types.go @@ -45,23 +45,11 @@ type TestMetrics struct { EqualCount int } -// JsonRpcResponseMetadata holds metadata about the expected response. -type JsonRpcResponseMetadata struct { - PathOptions jsoniter.RawMessage `json:"pathOptions"` -} - -// JsonRpcTestMetadata holds metadata about the test request/response. -type JsonRpcTestMetadata struct { - Request any `json:"request"` - Response *JsonRpcResponseMetadata `json:"response"` -} - -// JsonRpcTest holds test-level information (identifier, description, metadata). +// JsonRpcTest holds test-level information (identifier, description). type JsonRpcTest struct { - Identifier string `json:"id"` - Reference string `json:"reference"` - Description string `json:"description"` - Metadata *JsonRpcTestMetadata `json:"metadata"` + Identifier string `json:"id"` + Reference string `json:"reference"` + Description string `json:"description"` } // JsonRpcCommand represents a single JSON-RPC command in a test fixture. From cc0369ce7e9e2338612682bae1a82dabd0b7c02d Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Tue, 17 Feb 2026 18:07:02 +0100 Subject: [PATCH 75/87] fix after rebase refactoring --- .../mainnet/eth_createAccessList/test_20.json | 3 +- internal/compare/comparator.go | 40 +++++++------------ 2 files changed, 16 insertions(+), 27 deletions(-) diff --git a/integration/mainnet/eth_createAccessList/test_20.json b/integration/mainnet/eth_createAccessList/test_20.json index b2f261fb..99f8ad6c 100644 --- a/integration/mainnet/eth_createAccessList/test_20.json +++ b/integration/mainnet/eth_createAccessList/test_20.json @@ -21,8 +21,7 @@ }, "response": { "id": 1, - "jsonrpc": "2.0", - "result": null + "jsonrpc": "2.0" } } ] diff --git a/internal/compare/comparator.go b/internal/compare/comparator.go index 5b631663..ffd2bdd7 100644 --- a/internal/compare/comparator.go +++ b/internal/compare/comparator.go @@ -134,20 +134,9 @@ func ProcessResponse( if cfg.DiffKind == config.JsonDiffGo { outcome.Metrics.ComparisonCount++ opts := &jsondiff.Options{SortArrays: true} + var expected, actual any if respIsMap && expIsMap { - diff := jsondiff.DiffJSON(expectedMap, responseMap, opts) - same = len(diff) == 0 - diffString := jsondiff.DiffString(expectedMap, responseMap, opts) - if writeErr := os.WriteFile(diffFile, []byte(diffString), 0644); writeErr != nil { - outcome.Error = writeErr - return - } - if !same { - outcome.Error = ErrDiffMismatch - if cfg.ReqTestNum != -1 { - outcome.ColoredDiff = jsondiff.ColoredString(expectedMap, responseMap, opts) - } - } + expected, actual = expectedMap, responseMap } else { responseArray, respIsArray := response.([]any) expectedArray, expIsArray := expectedResponse.([]any) @@ -155,18 +144,19 @@ func ProcessResponse( outcome.Error = errors.New("cannot compare JSON objects (neither maps nor arrays)") return } - diff := jsondiff.DiffJSON(expectedArray, responseArray, opts) - same = len(diff) == 0 - diffString := jsondiff.DiffString(expectedArray, responseArray, opts) - if writeErr := os.WriteFile(diffFile, []byte(diffString), 0644); writeErr != nil { - outcome.Error = writeErr - return - } - if !same { - outcome.Error = ErrDiffMismatch - if cfg.ReqTestNum != -1 { - outcome.ColoredDiff = jsondiff.ColoredString(expectedArray, responseArray, opts) - } + expected, actual = expectedArray, responseArray + } + diff := jsondiff.DiffJSON(expected, actual, opts) + same = len(diff) == 0 + diffString := jsondiff.DiffString(expected, actual, opts) + if writeErr := os.WriteFile(diffFile, []byte(diffString), 0644); writeErr != nil { + outcome.Error = writeErr + return + } + if !same { + outcome.Error = ErrDiffMismatch + if cfg.ReqTestNum != -1 { + outcome.ColoredDiff = jsondiff.ColoredString(expected, actual, opts) } } } else { From 999809d7acf7f7d62856128da58063d6912f30d3 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Tue, 17 Feb 2026 18:36:35 +0100 Subject: [PATCH 76/87] json-diff is bool --- cmd/integration/main.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 9c876ab9..61d2232a 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -87,8 +87,8 @@ func parseFlags(cfg *config.Config) error { excludeTestList := flag.String("X", "", "exclude test list") flag.StringVar(excludeTestList, "exclude-test-list", "", "exclude test list") - diffKind := flag.String("j", cfg.DiffKind.String(), "diff for JSON values") - flag.StringVar(diffKind, "json-diff", cfg.DiffKind.String(), "diff for JSON values") + jsonDiff := flag.Bool("j", false, "use json-diff for compare") + flag.BoolVar(jsonDiff, "json-diff", false, "use json-diff for compare") waitingTime := flag.Int("w", 0, "waiting time in milliseconds") flag.IntVar(waitingTime, "waiting-time", 0, "waiting time in milliseconds") @@ -138,11 +138,9 @@ func parseFlags(cfg *config.Config) error { cfg.MemProfile = *memProfile cfg.TraceFile = *traceFile - kind, err := config.ParseDiffKind(*diffKind) - if err != nil { - return err + if *jsonDiff { + cfg.DiffKind = config.JsonDiffTool } - cfg.DiffKind = kind if *daemonPort { cfg.DaemonUnderTest = config.DaemonOnOtherPort @@ -196,7 +194,7 @@ func usage() { fmt.Println("") fmt.Println("Options:") fmt.Println(" -h, --help print this help") - fmt.Println(" -j, --json-diff use json-diff to make compare [default use json-diff]") + fmt.Println(" -j, --json-diff use json-diff tool to make compare [default: json-diff-go]") fmt.Println(" -f, --display-only-fail shows only failed tests (not Skipped) [default: print all]") fmt.Println(" -E, --do-not-compare-error do not compare error") fmt.Println(" -v, --verbose 0: no message; 1: print result; 2: print request/response [default 0]") From 0a7ce7e856637696fc0833882c4a119aa00dc47d Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Wed, 18 Feb 2026 09:05:57 +0100 Subject: [PATCH 77/87] restore json diff kind argument --- cmd/integration/main.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 61d2232a..9c876ab9 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -87,8 +87,8 @@ func parseFlags(cfg *config.Config) error { excludeTestList := flag.String("X", "", "exclude test list") flag.StringVar(excludeTestList, "exclude-test-list", "", "exclude test list") - jsonDiff := flag.Bool("j", false, "use json-diff for compare") - flag.BoolVar(jsonDiff, "json-diff", false, "use json-diff for compare") + diffKind := flag.String("j", cfg.DiffKind.String(), "diff for JSON values") + flag.StringVar(diffKind, "json-diff", cfg.DiffKind.String(), "diff for JSON values") waitingTime := flag.Int("w", 0, "waiting time in milliseconds") flag.IntVar(waitingTime, "waiting-time", 0, "waiting time in milliseconds") @@ -138,9 +138,11 @@ func parseFlags(cfg *config.Config) error { cfg.MemProfile = *memProfile cfg.TraceFile = *traceFile - if *jsonDiff { - cfg.DiffKind = config.JsonDiffTool + kind, err := config.ParseDiffKind(*diffKind) + if err != nil { + return err } + cfg.DiffKind = kind if *daemonPort { cfg.DaemonUnderTest = config.DaemonOnOtherPort @@ -194,7 +196,7 @@ func usage() { fmt.Println("") fmt.Println("Options:") fmt.Println(" -h, --help print this help") - fmt.Println(" -j, --json-diff use json-diff tool to make compare [default: json-diff-go]") + fmt.Println(" -j, --json-diff use json-diff to make compare [default use json-diff]") fmt.Println(" -f, --display-only-fail shows only failed tests (not Skipped) [default: print all]") fmt.Println(" -E, --do-not-compare-error do not compare error") fmt.Println(" -v, --verbose 0: no message; 1: print result; 2: print request/response [default 0]") From e7175506e8ffa2a055a653ff9b91dd60068594e7 Mon Sep 17 00:00:00 2001 From: canepat <16927169+canepat@users.noreply.github.com> Date: Wed, 18 Feb 2026 09:09:55 +0100 Subject: [PATCH 78/87] fix usage --- cmd/integration/main.go | 52 ++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/cmd/integration/main.go b/cmd/integration/main.go index 9c876ab9..e090fc11 100644 --- a/cmd/integration/main.go +++ b/cmd/integration/main.go @@ -195,33 +195,33 @@ func usage() { fmt.Println("Launch an automated sequence of RPC integration tests on target blockchain node(s)") fmt.Println("") fmt.Println("Options:") - fmt.Println(" -h, --help print this help") - fmt.Println(" -j, --json-diff use json-diff to make compare [default use json-diff]") - fmt.Println(" -f, --display-only-fail shows only failed tests (not Skipped) [default: print all]") - fmt.Println(" -E, --do-not-compare-error do not compare error") - fmt.Println(" -v, --verbose 0: no message; 1: print result; 2: print request/response [default 0]") - fmt.Println(" -c, --continue runs all tests even if one test fails [default: exit at first failed test]") - fmt.Println(" -l, --loops [default loop 1]") - fmt.Println(" -b, --blockchain [default: mainnet]") - fmt.Println(" -s, --start-from-test run tests starting from specified test number [default starts from 1]") - fmt.Println(" -t, --run-test run single test using global test number") - fmt.Println(" -d, --compare-erigon-rpcdaemon send requests also to the reference daemon e.g.: Erigon RpcDaemon") - fmt.Println(" -T, --transport-type http,http_comp,https,websocket,websocket_comp [default http]") - fmt.Println(" -k, --jwt authentication token file") - fmt.Println(" -K, --create-jwt generate authentication token file and use it") - fmt.Println(" -a, --api-list-with run all tests of the specified API that contains string") - fmt.Println(" -A, --api-list run all tests of the specified API that match full name") - fmt.Println(" -x, --exclude-api-list exclude API list") - fmt.Println(" -X, --exclude-test-list exclude test list") - fmt.Println(" -o, --dump-response dump JSON RPC response even if responses are the same") - fmt.Println(" -H, --host host where the RpcDaemon is located [default: localhost]") - fmt.Println(" -p, --port port where the RpcDaemon is located [default: 8545]") - fmt.Println(" -I, --daemon-port Use 51515/51516 ports to server") + fmt.Println(" -h, --help print this help") + fmt.Println(" -j, --json-diff use json-diff to make compare [default: use json-diff-go]") + fmt.Println(" -f, --display-only-fail shows only failed tests (not Skipped) [default: print all]") + fmt.Println(" -E, --do-not-compare-error do not compare error") + fmt.Println(" -v, --verbose 0: no message; 1: print result; 2: print request/response [default: 0]") + fmt.Println(" -c, --continue runs all tests even if one test fails [default: exit at first failed test]") + fmt.Println(" -l, --loops the number of integration tests loops [default: 1]") + fmt.Println(" -b, --blockchain the network to test [default: mainnet]") + fmt.Println(" -s, --start-from-test run tests starting from specified test number [default: 1]") + fmt.Println(" -t, --run-test run single test using global test number") + fmt.Println(" -d, --compare-erigon-rpcdaemon send requests also to the reference daemon e.g.: Erigon RpcDaemon") + fmt.Println(" -T, --transport-type http,http_comp,https,websocket,websocket_comp [default: http]") + fmt.Println(" -k, --jwt authentication token file") + fmt.Println(" -K, --create-jwt generate authentication token file and use it") + fmt.Println(" -a, --api-list-with run all tests of the specified API that contains string") + fmt.Println(" -A, --api-list run all tests of the specified API that match full name") + fmt.Println(" -x, --exclude-api-list exclude API list") + fmt.Println(" -X, --exclude-test-list exclude test list") + fmt.Println(" -o, --dump-response dump JSON RPC response even if responses are the same") + fmt.Println(" -H, --host host where the RpcDaemon is located [default: localhost]") + fmt.Println(" -p, --port port where the RpcDaemon is located [default: 8545]") + fmt.Println(" -I, --daemon-port use 51515/51516 ports to server") fmt.Println(" -e, --verify-external-provider send any request also to external API endpoint as reference") - fmt.Println(" -i, --without-compare-results send request and waits response without compare results") - fmt.Println(" -w, --waiting-time wait time after test execution in milliseconds") - fmt.Println(" -S, --serial all tests run in serial way [default: parallel]") - fmt.Println(" -L, --tests-on-latest-block runs only test on latest block") + fmt.Println(" -i, --without-compare-results send request and waits response without compare results") + fmt.Println(" -w, --waiting-time wait time after test execution in milliseconds") + fmt.Println(" -S, --serial all tests run in serial way [default: parallel]") + fmt.Println(" -L, --tests-on-latest-block runs only test on latest block") } func runMain() int { From db7eed7cbc2e871f81fa562bd7d0f48c5d857d4d Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Wed, 18 Feb 2026 20:49:16 +0100 Subject: [PATCH 79/87] fix -l option --- internal/runner/runner.go | 160 ++++++++++++++++++-------------------- 1 file changed, 74 insertions(+), 86 deletions(-) diff --git a/internal/runner/runner.go b/internal/runner/runner.go index 095799da..b34ea6fb 100644 --- a/internal/runner/runner.go +++ b/internal/runner/runner.go @@ -76,11 +76,6 @@ func Run(ctx context.Context, cancelCtx context.CancelFunc, cfg *config.Config) return -1, err } - // Worker pool setup - var wg sync.WaitGroup - testsChan := make(chan *testdata.TestDescriptor, 2000) - resultsChan := make(chan testdata.TestResult, 2000) - numWorkers := 1 if cfg.Parallel { numWorkers = runtime.NumCPU() @@ -92,95 +87,89 @@ func Run(ctx context.Context, cancelCtx context.CancelFunc, cfg *config.Config) clients[tt] = internalrpc.NewClient(tt, "", cfg.VerboseLevel) } - // Start workers - for range numWorkers { - wg.Add(1) + availableTestedAPIs := discovery.TotalAPIs + globalTestNumber := 0 + stats := &Stats{} + + // Each loop iteration runs as a complete batch: all tests are scheduled, + // workers drain the channel, results are collected, then the next iteration starts. + for loopNum := range cfg.LoopNumber { + if ctx.Err() != nil { + break + } + + if cfg.LoopNumber != 1 { + fmt.Printf("\nTest iteration: %d\n", loopNum+1) + } + + testsChan := make(chan *testdata.TestDescriptor, 2000) + resultsChan := make(chan testdata.TestResult, 2000) + + var wg sync.WaitGroup + for range numWorkers { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case test := <-testsChan: + if test == nil { + return + } + testOutcome := RunTest(ctx, test, cfg, clients[test.TransportType]) + resultsChan <- testdata.TestResult{Outcome: testOutcome, Test: test} + case <-ctx.Done(): + return + } + } + }() + } + + var resultsWg sync.WaitGroup + resultsWg.Add(1) go func() { - defer wg.Done() + defer resultsWg.Done() + w := bufio.NewWriterSize(os.Stdout, 64*1024) + defer w.Flush() + pending := make(map[int]testdata.TestResult) + nextIndex := 0 for { select { - case test := <-testsChan: - if test == nil { + case result, ok := <-resultsChan: + if !ok { return } - testOutcome := RunTest(ctx, test, cfg, clients[test.TransportType]) - resultsChan <- testdata.TestResult{Outcome: testOutcome, Test: test} + pending[result.Test.Index] = result + // Flush all consecutive results starting from nextIndex + for { + r, exists := pending[nextIndex] + if !exists { + break + } + delete(pending, nextIndex) + nextIndex++ + printResult(w, &r, stats, cfg, cancelCtx) + if cfg.ExitOnFail && stats.FailedTests > 0 { + return + } + } case <-ctx.Done(): return } } }() - } - - // Results collector with buffered stdout — prints in scheduling order - var resultsWg sync.WaitGroup - resultsWg.Add(1) - stats := &Stats{} - go func() { - defer resultsWg.Done() - w := bufio.NewWriterSize(os.Stdout, 64*1024) - defer w.Flush() - pending := make(map[int]testdata.TestResult) - nextIndex := 0 - for { - select { - case result, ok := <-resultsChan: - if !ok { - return - } - pending[result.Test.Index] = result - // Flush all consecutive results starting from nextIndex - for { - r, exists := pending[nextIndex] - if !exists { - break - } - delete(pending, nextIndex) - nextIndex++ - printResult(w, &r, stats, cfg, cancelCtx) - if cfg.ExitOnFail && stats.FailedTests > 0 { - return - } - } - case <-ctx.Done(): - return - } - } - }() - - // Main scheduling loop - globalTestNumber := 0 - availableTestedAPIs := discovery.TotalAPIs - scheduledIndex := 0 - testRep := 0 - - for testRep = range cfg.LoopNumber { - select { - case <-ctx.Done(): - goto done - default: - } - - if cfg.LoopNumber != 1 { - fmt.Printf("\nTest iteration: %d\n", testRep+1) - } + // Schedule all tests for this iteration + scheduledIndex := 0 transportTypes := cfg.TransportTypes() + transportLoop: for _, transportType := range transportTypes { - select { - case <-ctx.Done(): - goto done - default: - } - testNumberInAnyLoop := 1 globalTestNumber = 0 for _, tc := range discovery.Tests { - select { - case <-ctx.Done(): - goto done - default: + if ctx.Err() != nil { + break transportLoop } globalTestNumber = tc.Number @@ -214,7 +203,7 @@ func Run(ctx context.Context, cancelCtx context.CancelFunc, cfg *config.Config) scheduledIndex++ select { case <-ctx.Done(): - goto done + break transportLoop case testsChan <- testDesc: } stats.ScheduledTests++ @@ -229,14 +218,13 @@ func Run(ctx context.Context, cancelCtx context.CancelFunc, cfg *config.Config) testNumberInAnyLoop++ } } - } -done: - // Close channels and wait - close(testsChan) - wg.Wait() - close(resultsChan) - resultsWg.Wait() + // Wait for this iteration to fully complete before starting the next + close(testsChan) + wg.Wait() + close(resultsChan) + resultsWg.Wait() + } if stats.ScheduledTests == 0 && cfg.TestingAPIsWith != "" { fmt.Printf("WARN: API filter %s selected no tests\n", cfg.TestingAPIsWith) @@ -264,7 +252,7 @@ done: // Print summary elapsed := time.Since(startTime) - stats.PrintSummary(elapsed, testRep, availableTestedAPIs, globalTestNumber) + stats.PrintSummary(elapsed, cfg.LoopNumber, availableTestedAPIs, globalTestNumber) if stats.FailedTests > 0 { return 1, nil From 3698787a569b55f01fda00116a0c498689bf719c Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Wed, 18 Feb 2026 20:49:47 +0100 Subject: [PATCH 80/87] add option --- .golangci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.golangci.yml b/.golangci.yml index 0640398c..cf410c40 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -84,6 +84,7 @@ issues: max-issues-per-linter: 0 output: + sort-results: true sort-order: - file From 5379beb90f3d1a399c0cc6ccf746e4b6655bc9c6 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Wed, 18 Feb 2026 21:07:00 +0100 Subject: [PATCH 81/87] fix after merge --- integration/run_tests.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/integration/run_tests.py b/integration/run_tests.py index 70ed7cb7..f7847227 100755 --- a/integration/run_tests.py +++ b/integration/run_tests.py @@ -259,9 +259,6 @@ def is_skipped(curr_api, test_name: str, global_test_number, config): for curr_test_name in api_not_compared: if curr_test_name in api_full_name: return 1 - for curr_test in tests_not_compared: - if curr_test in api_full_test_name: - return 1 if config.exclude_api_list != "": # scans exclude api list (-x) tokenize_exclude_api_list = config.exclude_api_list.split(",") for exclude_api in tokenize_exclude_api_list: @@ -322,6 +319,27 @@ def api_under_test(curr_api, test_name, config): return in_latest_list +def generate_json_report(filename, start_time, elapsed, total_tests, tested_apis, + loops, executed_tests, not_executed_tests, success_tests, + failed_tests, test_results): + """ Generate JSON report with test results """ + report = { + "summary": { + "start_time": start_time.isoformat(), + "time_elapsed": str(elapsed), + "available_tests": total_tests, + "available_tested_api": tested_apis, + "number_of_loops": loops + 1, + "executed_tests": executed_tests, + "not_executed_tests": not_executed_tests, + "success_tests": success_tests, + "failed_tests": failed_tests + }, + "test_results": test_results + } + with open(filename, 'w', encoding='utf8') as f: + json.dump(report, f, indent=2) + def print_latest_block(server1_url: str, server2_url: str): """ print ltest block number From 0db76b9d5e4971cba538bf1750e4e5115db4bfc2 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Thu, 19 Feb 2026 09:40:57 +0100 Subject: [PATCH 82/87] use default workers --- internal/perf/vegeta.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/internal/perf/vegeta.go b/internal/perf/vegeta.go index b5c0c7a3..1397de74 100644 --- a/internal/perf/vegeta.go +++ b/internal/perf/vegeta.go @@ -288,18 +288,22 @@ func (pt *PerfTest) runVegetaAttack(ctx context.Context, targets []vegeta.Target maxBodyInt, _ := strconv.Atoi(pt.Config.MaxBodyRsp) tr := &http.Transport{ - DisableCompression: pt.Config.DisableHttpCompression, - Proxy: http.ProxyFromEnvironment, + DisableCompression: pt.Config.DisableHttpCompression, + Proxy: http.ProxyFromEnvironment, + MaxIdleConnsPerHost: maxConnInt, } customClient := &http.Client{ Transport: tr, } + // + // High workers() counts can saturate server resources + // attacker := vegeta.NewAttacker( vegeta.Client(customClient), vegeta.Timeout(timeout), - vegeta.Workers(uint64(maxConnInt)), + vegeta.Workers(vegeta.DefaultWorkers), vegeta.MaxBody(int64(maxBodyInt)), vegeta.KeepAlive(true), ) From a79e9b5b6481d2a8db29f57d9165d70e053fe4e3 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Fri, 20 Feb 2026 08:44:00 +0100 Subject: [PATCH 83/87] configured also MaxConnsPerHost --- internal/perf/vegeta.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/perf/vegeta.go b/internal/perf/vegeta.go index 1397de74..1f09a875 100644 --- a/internal/perf/vegeta.go +++ b/internal/perf/vegeta.go @@ -291,6 +291,7 @@ func (pt *PerfTest) runVegetaAttack(ctx context.Context, targets []vegeta.Target DisableCompression: pt.Config.DisableHttpCompression, Proxy: http.ProxyFromEnvironment, MaxIdleConnsPerHost: maxConnInt, + MaxConnsPerHost: maxConnInt, } customClient := &http.Client{ From 3020c3d74811c8077b98cd98e43643065fa6e234 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Fri, 20 Feb 2026 11:05:02 +0100 Subject: [PATCH 84/87] configured MaxIdleConns & report file equivalent to paython --- internal/perf/report.go | 91 +++++++++++++++++++---------------------- internal/perf/vegeta.go | 1 + 2 files changed, 42 insertions(+), 50 deletions(-) diff --git a/internal/perf/report.go b/internal/perf/report.go index c20c9cfb..480e52a1 100644 --- a/internal/perf/report.go +++ b/internal/perf/report.go @@ -45,20 +45,21 @@ type JSONReport struct { // PlatformInfo holds platform hardware and software information. type PlatformInfo struct { - Vendor string `json:"vendor"` - Product string `json:"product"` - Board string `json:"board"` - CPU string `json:"cpu"` - Bogomips string `json:"bogomips"` - Kernel string `json:"kernel"` - GCCVersion string `json:"gccVersion"` - GoVersion string `json:"goVersion"` - ClientCommit string `json:"clientCommit"` + Vendor string `json:"vendor"` + Product string `json:"product"` + Board string `json:"board"` + CPU string `json:"cpu"` + Bogomips string `json:"bogomips"` + Kernel string `json:"kernel"` + GCCVersion string `json:"gccVersion"` + GoVersion string `json:"goVersion"` + SilkrpcCommit string `json:"silkrpcCommit"` + ErigonCommit string `json:"erigonCommit"` } // ConfigurationInfo holds test configuration information. type ConfigurationInfo struct { - TestingClient string `json:"testingClient"` + TestingDaemon string `json:"testingDaemon"` TestingAPI string `json:"testingApi"` TestSequence string `json:"testSequence"` TestRepetitions int `json:"testRepetitions"` @@ -69,8 +70,8 @@ type ConfigurationInfo struct { // JSONTestResult holds results for a single QPS/duration test. type JSONTestResult struct { - QPS int `json:"qps"` - Duration int `json:"duration"` + QPS string `json:"qps"` + Duration string `json:"duration"` TestRepetitions []RepetitionInfo `json:"testRepetitions"` } @@ -239,18 +240,19 @@ func (tr *TestReport) initializeJSONReport(cpuModel, bogomips, kernelVersion, ch tr.jsonReport = &JSONReport{ Platform: PlatformInfo{ - Vendor: strings.TrimSpace(tr.hardware.Vendor()), - Product: strings.TrimSpace(tr.hardware.Product()), - Board: strings.TrimSpace(tr.hardware.Board()), - CPU: strings.TrimSpace(cpuModel), - Bogomips: strings.TrimSpace(bogomips), - Kernel: strings.TrimSpace(kernelVersion), - GCCVersion: strings.TrimSpace(gccVersion), - GoVersion: strings.TrimSpace(goVersion), - ClientCommit: strings.TrimSpace(clientCommit), + Vendor: strings.TrimSpace(tr.hardware.Vendor()), + Product: strings.TrimSpace(tr.hardware.Product()), + Board: strings.TrimSpace(tr.hardware.Board()), + CPU: strings.TrimSpace(cpuModel), + Bogomips: strings.TrimSpace(bogomips), + Kernel: strings.TrimSpace(kernelVersion), + GCCVersion: strings.TrimSpace(gccVersion), + GoVersion: strings.TrimSpace(goVersion), + SilkrpcCommit: "", + ErigonCommit: strings.TrimSpace(clientCommit), }, Configuration: ConfigurationInfo{ - TestingClient: tr.Config.TestingClient, + TestingDaemon: tr.Config.TestingClient, TestingAPI: tr.Config.TestType, TestSequence: tr.Config.TestSequence, TestRepetitions: tr.Config.Repetitions, @@ -300,8 +302,8 @@ func (tr *TestReport) writeTestReportToJSON(metrics *PerfMetrics) error { if metrics.Repetition == 0 { tr.currentTestIdx++ tr.jsonReport.Results = append(tr.jsonReport.Results, JSONTestResult{ - QPS: metrics.QPS, - Duration: metrics.Duration, + QPS: strconv.Itoa(metrics.QPS), + Duration: strconv.Itoa(metrics.Duration), TestRepetitions: []RepetitionInfo{}, }) } @@ -332,7 +334,8 @@ func (tr *TestReport) writeTestReportToJSON(metrics *PerfMetrics) error { return nil } -// generateJSONReport generates a JSON report from a vegeta binary file. +// generateJSONReport generates a JSON report from a vegeta binary file, +// using Vegeta's native JSON marshaling (equivalent to `vegeta report --type=json`). func generateJSONReport(binaryFile string) (map[string]any, error) { file, err := os.Open(binaryFile) if err != nil { @@ -354,29 +357,21 @@ func generateJSONReport(binaryFile string) (map[string]any, error) { } metrics.Close() - report := map[string]any{ - "requests": metrics.Requests, - "duration": metrics.Duration.Seconds(), - "rate": metrics.Rate, - "throughput": metrics.Throughput, - "success": metrics.Success, - "latencies": map[string]any{ - "min": metrics.Latencies.Min.Seconds(), - "mean": metrics.Latencies.Mean.Seconds(), - "p50": metrics.Latencies.P50.Seconds(), - "p90": metrics.Latencies.P90.Seconds(), - "p95": metrics.Latencies.P95.Seconds(), - "p99": metrics.Latencies.P99.Seconds(), - "max": metrics.Latencies.Max.Seconds(), - }, - "status_codes": metrics.StatusCodes, - "errors": metrics.Errors, + data, err := json.Marshal(&metrics) + if err != nil { + return nil, err + } + + var report map[string]any + if err := json.Unmarshal(data, &report); err != nil { + return nil, err } return report, nil } -// generateHdrPlot generates HDR histogram plot data from a vegeta binary file. +// generateHdrPlot generates HDR histogram plot data from a vegeta binary file, +// equivalent to `vegeta report --type=hdrplot`. func generateHdrPlot(binaryFile string) (string, error) { file, err := os.Open(binaryFile) if err != nil { @@ -399,13 +394,9 @@ func generateHdrPlot(binaryFile string) (string, error) { metrics.Close() var buf bytes.Buffer - histogram := metrics.Histogram - if histogram != nil { - for i, bucket := range histogram.Buckets { - if _, err := fmt.Fprintf(&buf, "%.6f %d\n", float64(bucket), histogram.Counts[i]); err != nil { - return "", err - } - } + reporter := vegeta.NewHDRHistogramPlotReporter(&metrics) + if err := reporter(&buf); err != nil { + return "", err } return buf.String(), nil diff --git a/internal/perf/vegeta.go b/internal/perf/vegeta.go index 1f09a875..ffc3c4fb 100644 --- a/internal/perf/vegeta.go +++ b/internal/perf/vegeta.go @@ -290,6 +290,7 @@ func (pt *PerfTest) runVegetaAttack(ctx context.Context, targets []vegeta.Target tr := &http.Transport{ DisableCompression: pt.Config.DisableHttpCompression, Proxy: http.ProxyFromEnvironment, + MaxIdleConns: maxConnInt, MaxIdleConnsPerHost: maxConnInt, MaxConnsPerHost: maxConnInt, } From 6c98afda60cee507ffaf29bb12bf2159552c773c Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Fri, 20 Feb 2026 13:19:59 +0100 Subject: [PATCH 85/87] close connections --- internal/perf/vegeta.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/perf/vegeta.go b/internal/perf/vegeta.go index ffc3c4fb..754ce022 100644 --- a/internal/perf/vegeta.go +++ b/internal/perf/vegeta.go @@ -292,7 +292,6 @@ func (pt *PerfTest) runVegetaAttack(ctx context.Context, targets []vegeta.Target Proxy: http.ProxyFromEnvironment, MaxIdleConns: maxConnInt, MaxIdleConnsPerHost: maxConnInt, - MaxConnsPerHost: maxConnInt, } customClient := &http.Client{ @@ -325,6 +324,7 @@ func (pt *PerfTest) runVegetaAttack(ctx context.Context, targets []vegeta.Target case result := <-resultCh: if result == nil { metrics.Close() + tr.CloseIdleConnections() return &metrics, nil } metrics.Add(result) From e321f7c2f96018a2d4b0f3d5f814ffa0390a5650 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Fri, 20 Feb 2026 16:43:48 +0100 Subject: [PATCH 86/87] re conf ConPErHost --- internal/perf/vegeta.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/perf/vegeta.go b/internal/perf/vegeta.go index 754ce022..b5d475ac 100644 --- a/internal/perf/vegeta.go +++ b/internal/perf/vegeta.go @@ -292,6 +292,7 @@ func (pt *PerfTest) runVegetaAttack(ctx context.Context, targets []vegeta.Target Proxy: http.ProxyFromEnvironment, MaxIdleConns: maxConnInt, MaxIdleConnsPerHost: maxConnInt, + MaxConnsPerHost: maxConnInt, } customClient := &http.Client{ From c97a13312697cfcd352c4673dd2337d1a51eac2e Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com.> Date: Fri, 20 Feb 2026 19:59:23 +0100 Subject: [PATCH 87/87] confif maxBody --- internal/perf/vegeta.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/internal/perf/vegeta.go b/internal/perf/vegeta.go index b5d475ac..bb6fbefe 100644 --- a/internal/perf/vegeta.go +++ b/internal/perf/vegeta.go @@ -285,7 +285,6 @@ func (pt *PerfTest) runVegetaAttack(ctx context.Context, targets []vegeta.Target timeout, _ := time.ParseDuration(pt.Config.VegetaResponseTimeout) maxConnInt, _ := strconv.Atoi(pt.Config.MaxConnection) - maxBodyInt, _ := strconv.Atoi(pt.Config.MaxBodyRsp) tr := &http.Transport{ DisableCompression: pt.Config.DisableHttpCompression, @@ -299,14 +298,21 @@ func (pt *PerfTest) runVegetaAttack(ctx context.Context, targets []vegeta.Target Transport: tr, } + // Vegeta v12 reads MaxBody bytes then drains the remainder with + // io.Copy(io.Discard, r.Body). If the drain fails (e.g. server RST on a + // keepalive connection), res.Code stays 0 and the request is counted as + // failed even though a 200 OK was received. + // With MaxBody(-1) Vegeta reads the full body; the drain is then a no-op + // (0 bytes remaining), res.Code is always set correctly, and success + // counting matches Python/vegeta-CLI behaviour. // - // High workers() counts can saturate server resources + // High workers() counts can saturate server resources. // attacker := vegeta.NewAttacker( vegeta.Client(customClient), vegeta.Timeout(timeout), - vegeta.Workers(vegeta.DefaultWorkers), - vegeta.MaxBody(int64(maxBodyInt)), + vegeta.Workers(vegeta.DefaultWorkers), + vegeta.MaxBody(-1), vegeta.KeepAlive(true), )