|
| 1 | +package block |
| 2 | + |
| 3 | +import ( |
| 4 | + "encoding/json" |
| 5 | + "fmt" |
| 6 | + "github.com/ethereum/go-ethereum/common" |
| 7 | + "github.com/ethereum/go-ethereum/core/types" |
| 8 | + "math/big" |
| 9 | + "net/http" |
| 10 | + "net/http/httptest" |
| 11 | + "sync/atomic" |
| 12 | + "testing" |
| 13 | + "time" |
| 14 | + |
| 15 | + "razor/rpc" |
| 16 | +) |
| 17 | + |
| 18 | +func makeBlockHandler(blockNumber uint64, blockTime uint64) http.HandlerFunc { |
| 19 | + return func(w http.ResponseWriter, r *http.Request) { |
| 20 | + var req map[string]interface{} |
| 21 | + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { |
| 22 | + http.Error(w, "bad request", http.StatusBadRequest) |
| 23 | + return |
| 24 | + } |
| 25 | + |
| 26 | + method, ok := req["method"].(string) |
| 27 | + if !ok { |
| 28 | + http.Error(w, "missing method", http.StatusBadRequest) |
| 29 | + return |
| 30 | + } |
| 31 | + |
| 32 | + var result interface{} |
| 33 | + |
| 34 | + switch method { |
| 35 | + case "eth_blockNumber": |
| 36 | + result = fmt.Sprintf("0x%x", blockNumber) |
| 37 | + |
| 38 | + case "eth_getBlockByNumber": |
| 39 | + // Use go-ethereum's types.Header struct to ensure all required fields are included |
| 40 | + header := &types.Header{ |
| 41 | + Number: big.NewInt(int64(blockNumber)), |
| 42 | + Time: blockTime, |
| 43 | + ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), |
| 44 | + UncleHash: types.EmptyUncleHash, |
| 45 | + Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"), |
| 46 | + Root: common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222"), |
| 47 | + TxHash: common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333"), |
| 48 | + ReceiptHash: common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444"), |
| 49 | + Bloom: types.Bloom{}, |
| 50 | + Difficulty: big.NewInt(2), |
| 51 | + GasLimit: 30000000, |
| 52 | + GasUsed: 0, |
| 53 | + Extra: []byte{}, |
| 54 | + } |
| 55 | + |
| 56 | + // Serialize the header to JSON format |
| 57 | + result = header |
| 58 | + default: |
| 59 | + http.Error(w, "unsupported method", http.StatusBadRequest) |
| 60 | + return |
| 61 | + } |
| 62 | + |
| 63 | + resp := map[string]interface{}{ |
| 64 | + "jsonrpc": "2.0", |
| 65 | + "id": req["id"], |
| 66 | + "result": result, |
| 67 | + } |
| 68 | + w.Header().Set("Content-Type", "application/json") |
| 69 | + _ = json.NewEncoder(w).Encode(resp) |
| 70 | + } |
| 71 | +} |
| 72 | + |
| 73 | +// createManagerFromServers creates an RPCManager from a list of test servers. |
| 74 | +func createManagerFromServers(servers ...*httptest.Server) *rpc.RPCManager { |
| 75 | + endpoints := make([]*rpc.RPCEndpoint, len(servers)) |
| 76 | + for i, s := range servers { |
| 77 | + endpoints[i] = &rpc.RPCEndpoint{URL: s.URL} |
| 78 | + } |
| 79 | + return &rpc.RPCManager{Endpoints: endpoints} |
| 80 | +} |
| 81 | + |
| 82 | +// TestBlockMonitorUpdateBlock tests `updateLatestBlock` behavior with different block numbers. |
| 83 | +func TestBlockMonitorUpdateBlock(t *testing.T) { |
| 84 | + // Simulate two endpoints: one returning an outdated block, another returning an up-to-date block. |
| 85 | + blockNumber := uint64(100) |
| 86 | + outdatedBlockNumber := uint64(90) |
| 87 | + |
| 88 | + ts1 := httptest.NewServer(makeBlockHandler(blockNumber, uint64(time.Now().Unix()))) |
| 89 | + t.Cleanup(func() { ts1.Close() }) |
| 90 | + |
| 91 | + ts2 := httptest.NewServer(makeBlockHandler(outdatedBlockNumber, uint64(time.Now().Unix()))) |
| 92 | + t.Cleanup(func() { ts2.Close() }) |
| 93 | + |
| 94 | + // Create an RPC manager with both endpoints. |
| 95 | + manager := createManagerFromServers(ts1, ts2) |
| 96 | + |
| 97 | + // Refresh endpoints so that the manager pings each endpoint. |
| 98 | + if err := manager.RefreshEndpoints(); err != nil { |
| 99 | + t.Fatalf("RefreshEndpoints failed: %v", err) |
| 100 | + } |
| 101 | + |
| 102 | + // Initialize BlockMonitor. |
| 103 | + client, _ := manager.GetBestRPCClient() |
| 104 | + bm := NewBlockMonitor(client, manager, 1, 10) |
| 105 | + |
| 106 | + // Simulate fetching the latest block. |
| 107 | + bm.updateLatestBlock() |
| 108 | + |
| 109 | + if bm.latestBlock == nil { |
| 110 | + t.Fatal("Expected latest block to be set, but got nil") |
| 111 | + } |
| 112 | + |
| 113 | + // Ensure that the latest block number is from the correct, up-to-date endpoint. |
| 114 | + if bm.latestBlock.Number.Uint64() != blockNumber { |
| 115 | + t.Errorf("Expected block number %d, got %d", blockNumber, bm.latestBlock.Number.Uint64()) |
| 116 | + } |
| 117 | + |
| 118 | + // Simulate outdated block number being reported. |
| 119 | + bm.latestBlock.Number = big.NewInt(int64(outdatedBlockNumber)) |
| 120 | + bm.updateLatestBlock() |
| 121 | + |
| 122 | + // The block number should remain at the latest, and an RPC switch should have occurred. |
| 123 | + newBestURL, _ := manager.GetBestEndpointURL() |
| 124 | + if newBestURL != ts1.URL { |
| 125 | + t.Errorf("Expected best endpoint to be %s after outdated block, got %s", ts1.URL, newBestURL) |
| 126 | + } |
| 127 | +} |
| 128 | + |
| 129 | +// TestBlockMonitorStaleBlock checks if the stale block detection works correctly. |
| 130 | +func TestBlockMonitorStaleBlock(t *testing.T) { |
| 131 | + currentTime := uint64(time.Now().Unix()) |
| 132 | + staleTime := uint64(time.Now().Add(-15 * time.Second).Unix()) |
| 133 | + |
| 134 | + ts1 := httptest.NewServer(makeBlockHandler(120, currentTime)) |
| 135 | + t.Cleanup(func() { ts1.Close() }) |
| 136 | + |
| 137 | + ts2 := httptest.NewServer(makeBlockHandler(110, staleTime)) |
| 138 | + t.Cleanup(func() { ts2.Close() }) |
| 139 | + |
| 140 | + // Create an RPC manager with both endpoints. |
| 141 | + manager := createManagerFromServers(ts1, ts2) |
| 142 | + |
| 143 | + // Refresh endpoints so that the manager pings each endpoint. |
| 144 | + if err := manager.RefreshEndpoints(); err != nil { |
| 145 | + t.Fatalf("RefreshEndpoints failed: %v", err) |
| 146 | + } |
| 147 | + |
| 148 | + // Initialize BlockMonitor. |
| 149 | + client, _ := manager.GetBestRPCClient() |
| 150 | + bm := NewBlockMonitor(client, manager, 1, 10) |
| 151 | + |
| 152 | + // Fetch the latest block (stale one) |
| 153 | + bm.updateLatestBlock() |
| 154 | + bm.checkForStaleBlock() |
| 155 | + |
| 156 | + if bm.latestBlock.Number.Uint64() != 120 { |
| 157 | + t.Errorf("Expected block number 120 after detecting stale block, got %d", bm.latestBlock.Number.Uint64()) |
| 158 | + } |
| 159 | +} |
| 160 | + |
| 161 | +// TestBlockMonitorSwitchOnStale tests switching to a better endpoint when a stale block is detected. |
| 162 | +func TestBlockMonitorSwitchOnStale(t *testing.T) { |
| 163 | + latestBlock := uint64(150) |
| 164 | + staleBlock := uint64(140) |
| 165 | + |
| 166 | + var blockNumber atomic.Uint64 |
| 167 | + blockNumber.Store(staleBlock) |
| 168 | + |
| 169 | + // Simulate a server that starts with a stale block but updates to a fresh block. |
| 170 | + ts1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
| 171 | + makeBlockHandler(blockNumber.Load(), uint64(time.Now().Unix()))(w, r) |
| 172 | + })) |
| 173 | + t.Cleanup(func() { ts1.Close() }) |
| 174 | + |
| 175 | + ts2 := httptest.NewServer(makeBlockHandler(latestBlock, uint64(time.Now().Unix()))) |
| 176 | + t.Cleanup(func() { ts2.Close() }) |
| 177 | + |
| 178 | + // Create an RPC manager. |
| 179 | + manager := createManagerFromServers(ts1, ts2) |
| 180 | + |
| 181 | + // Refresh endpoints so that the manager pings each endpoint. |
| 182 | + if err := manager.RefreshEndpoints(); err != nil { |
| 183 | + t.Fatalf("RefreshEndpoints failed: %v", err) |
| 184 | + } |
| 185 | + |
| 186 | + // Initialize BlockMonitor. |
| 187 | + client, _ := manager.GetBestRPCClient() |
| 188 | + bm := NewBlockMonitor(client, manager, 1, 10) |
| 189 | + |
| 190 | + // Start with a stale block. |
| 191 | + blockNumber.Store(staleBlock) |
| 192 | + bm.updateLatestBlock() |
| 193 | + bm.checkForStaleBlock() |
| 194 | + |
| 195 | + // Ensure the switch occurred to the better endpoint. |
| 196 | + bestURL, _ := manager.GetBestEndpointURL() |
| 197 | + if bestURL != ts2.URL { |
| 198 | + t.Errorf("Expected best endpoint to switch to %s, got %s", ts2.URL, bestURL) |
| 199 | + } |
| 200 | + |
| 201 | + // Now update the first endpoint to a fresh block. |
| 202 | + blockNumber.Store(latestBlock) |
| 203 | + bm.updateLatestBlock() |
| 204 | + bm.checkForStaleBlock() |
| 205 | + |
| 206 | + // The monitor should detect the updated block. |
| 207 | + if bm.latestBlock.Number.Uint64() != latestBlock { |
| 208 | + t.Errorf("Expected latest block number %d, got %d", latestBlock, bm.latestBlock.Number.Uint64()) |
| 209 | + } |
| 210 | +} |
| 211 | + |
| 212 | +// TestBlockMonitorSwitchFailure tests when no alternate endpoints are available. |
| 213 | +func TestBlockMonitorSwitchFailure(t *testing.T) { |
| 214 | + staleBlock := uint64(80) |
| 215 | + |
| 216 | + ts1 := httptest.NewServer(makeBlockHandler(staleBlock, uint64(time.Now().Add(-20*time.Second).Unix()))) |
| 217 | + t.Cleanup(func() { ts1.Close() }) |
| 218 | + |
| 219 | + // Create an RPC manager with a single stale endpoint. |
| 220 | + manager := createManagerFromServers(ts1) |
| 221 | + |
| 222 | + // Refresh endpoints so that the manager pings each endpoint. |
| 223 | + if err := manager.RefreshEndpoints(); err != nil { |
| 224 | + t.Fatalf("RefreshEndpoints failed: %v", err) |
| 225 | + } |
| 226 | + |
| 227 | + // Initialize BlockMonitor. |
| 228 | + client, _ := manager.GetBestRPCClient() |
| 229 | + bm := NewBlockMonitor(client, manager, 1, 10) |
| 230 | + |
| 231 | + // Start with a stale block. |
| 232 | + bm.updateLatestBlock() |
| 233 | + bm.checkForStaleBlock() |
| 234 | + |
| 235 | + // Since no alternate endpoints are available, the best endpoint should remain unchanged. |
| 236 | + bestURL, _ := manager.GetBestEndpointURL() |
| 237 | + if bestURL != ts1.URL { |
| 238 | + t.Errorf("Expected best endpoint to remain %s, got %s", ts1.URL, bestURL) |
| 239 | + } |
| 240 | +} |
0 commit comments