-
Notifications
You must be signed in to change notification settings - Fork 450
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
pull in geth changes for state recreation #2005
Merged
Merged
Changes from 20 commits
Commits
Show all changes
49 commits
Select commit
Hold shift + click to select a range
8eebd46
update geth and adjust tests
magicxyyz 6fb2ba5
update geth
magicxyyz f865d1e
Merge branch 'master' into state-release
magicxyyz a40f8f1
Merge branch 'master' into state-release
magicxyyz d21fdd8
update geth
magicxyyz a1ac5d0
Merge branch 'master' into state-release
magicxyyz 4ff776d
Revert "update geth and adjust tests"
magicxyyz 32df394
update expected error in TestRecreateStateForRPCBlockNotFoundWhileRec…
magicxyyz a9a0772
update geth
magicxyyz 1234559
Merge branch 'master' into state-release
amsanghi ea05f0a
add test for getting state for rpc
magicxyyz dc936ea
Merge branch 'state-release-test' into state-release
magicxyyz 7659d68
add missing error check in recreate state test
magicxyyz c1bdefd
update geth
magicxyyz 923bc4c
add test for getting state for rpc on hybrid archive node
magicxyyz ac9d63b
add missing error check in recreate state test
magicxyyz 42b6c83
add missing error check
magicxyyz bb5c908
Merge branch 'state-release-test' into state-release
magicxyyz b776a68
Merge branch 'master' into state-release
amsanghi 8e2ede8
update geth
magicxyyz d1f244f
update geth
magicxyyz 25bd9eb
Merge branch 'master' into state-release
magicxyyz f69b53e
update go.mod
magicxyyz fdc42ba
update recreate state tests to account for states saved on shutdown
magicxyyz e12dd8a
update geth
magicxyyz 811d4fa
Merge branch 'master' into state-release
magicxyyz 2e3d313
update geth
magicxyyz e415501
Merge branch 'master' into state-release
magicxyyz a781faf
fix recreation after restart test
magicxyyz 72d678d
update geth
magicxyyz 9bff2b2
remove commited by mistake files
magicxyyz 60e4ff4
update geth
magicxyyz c341182
system_tests: fix initialization of default value of MaxRecreateState…
magicxyyz 5559118
update geth
magicxyyz ab310ac
Merge branch 'master' into state-release
magicxyyz 251abe0
update blocks reexecutor
magicxyyz 41ebf43
update geth
magicxyyz 22fa881
uncomment testing defaults in recreate state tests
magicxyyz 49ec18c
Merge branch 'master' into state-release
magicxyyz 6890439
update geth
magicxyyz 48d0b9d
update geth
magicxyyz 7296df7
Merge branch 'master' into state-release
magicxyyz 2a53d99
Merge branch 'master' into state-release
magicxyyz 2d449ae
add StateAndHeader test
magicxyyz 8f4bf75
Merge branch 'master' into state-release
magicxyyz 8c24484
update StateAndHeader test
magicxyyz b64198a
update error check in StateAndHeader test
magicxyyz 7718918
update geth
magicxyyz 25624db
update geth
magicxyyz File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
Submodule go-ethereum
updated
6 files
+43 −8 | arbitrum/apibackend.go | |
+6 −1 | arbitrum/recordingdb.go | |
+20 −30 | arbitrum/recreatestate.go | |
+1 −8 | core/state/pruner/pruner.go | |
+1 −1 | eth/api_backend.go | |
+11 −4 | eth/state_accessor.go |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3,6 +3,7 @@ package arbtest | |
import ( | ||
"context" | ||
"errors" | ||
"fmt" | ||
"math/big" | ||
"strings" | ||
"testing" | ||
|
@@ -12,21 +13,16 @@ import ( | |
"github.com/ethereum/go-ethereum/core" | ||
"github.com/ethereum/go-ethereum/core/rawdb" | ||
"github.com/ethereum/go-ethereum/core/types" | ||
"github.com/ethereum/go-ethereum/ethclient" | ||
"github.com/ethereum/go-ethereum/ethdb" | ||
"github.com/ethereum/go-ethereum/params" | ||
"github.com/ethereum/go-ethereum/rpc" | ||
"github.com/ethereum/go-ethereum/trie" | ||
"github.com/offchainlabs/nitro/arbnode" | ||
"github.com/offchainlabs/nitro/execution/gethexec" | ||
"github.com/offchainlabs/nitro/util" | ||
) | ||
|
||
func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethexec.Config, txCount uint64) (node *arbnode.Node, executionNode *gethexec.ExecutionNode, l2client *ethclient.Client, cancel func()) { | ||
t.Helper() | ||
builder := NewNodeBuilder(ctx).DefaultConfig(t, true) | ||
builder.execConfig = execConfig | ||
cleanup := builder.Build(t) | ||
builder.L2Info.GenerateAccount("User2") | ||
func makeSomeTransfers(t *testing.T, ctx context.Context, builder *NodeBuilder, txCount uint64) { | ||
var txs []*types.Transaction | ||
for i := uint64(0); i < txCount; i++ { | ||
tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) | ||
|
@@ -38,8 +34,16 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethe | |
_, err := builder.L2.EnsureTxSucceeded(tx) | ||
Require(t, err) | ||
} | ||
} | ||
|
||
return builder.L2.ConsensusNode, builder.L2.ExecNode, builder.L2.Client, cleanup | ||
func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethexec.Config, txCount uint64) (*NodeBuilder, func()) { | ||
t.Helper() | ||
builder := NewNodeBuilder(ctx).DefaultConfig(t, true) | ||
builder.execConfig = execConfig | ||
cleanup := builder.Build(t) | ||
builder.L2Info.GenerateAccount("User2") | ||
makeSomeTransfers(t, ctx, builder, txCount) | ||
return builder, cleanup | ||
} | ||
|
||
func fillHeaderCache(t *testing.T, bc *core.BlockChain, from, to uint64) { | ||
|
@@ -89,17 +93,18 @@ func removeStatesFromDb(t *testing.T, bc *core.BlockChain, db ethdb.Database, fr | |
func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { | ||
ctx, cancel := context.WithCancel(context.Background()) | ||
defer cancel() | ||
nodeConfig := gethexec.ConfigDefaultTest() | ||
nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth | ||
nodeConfig.Sequencer.MaxBlockSpeed = 0 | ||
nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
nodeConfig.Caching.Archive = true | ||
execConfig := gethexec.ConfigDefaultTest() | ||
execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth | ||
execConfig.Sequencer.MaxBlockSpeed = 0 | ||
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
execConfig.Caching.Archive = true | ||
// disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there | ||
nodeConfig.Caching.TrieCleanCache = 0 | ||
nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 | ||
nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 | ||
_, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) | ||
execConfig.Caching.TrieCleanCache = 0 | ||
execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 | ||
execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 | ||
builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) | ||
defer cancelNode() | ||
execNode, l2client := builder.L2.ExecNode, builder.L2.Client | ||
bc := execNode.Backend.ArbInterface().BlockChain() | ||
db := execNode.Backend.ChainDb() | ||
|
||
|
@@ -123,17 +128,18 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { | |
ctx, cancel := context.WithCancel(context.Background()) | ||
defer cancel() | ||
depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei)) | ||
nodeConfig := gethexec.ConfigDefaultTest() | ||
nodeConfig.RPC.MaxRecreateStateDepth = depthGasLimit | ||
nodeConfig.Sequencer.MaxBlockSpeed = 0 | ||
nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
nodeConfig.Caching.Archive = true | ||
execConfig := gethexec.ConfigDefaultTest() | ||
execConfig.RPC.MaxRecreateStateDepth = depthGasLimit | ||
execConfig.Sequencer.MaxBlockSpeed = 0 | ||
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
execConfig.Caching.Archive = true | ||
// disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there | ||
nodeConfig.Caching.TrieCleanCache = 0 | ||
nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 | ||
nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 | ||
_, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) | ||
execConfig.Caching.TrieCleanCache = 0 | ||
execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 | ||
execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 | ||
builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) | ||
defer cancelNode() | ||
execNode, l2client := builder.L2.ExecNode, builder.L2.Client | ||
bc := execNode.Backend.ArbInterface().BlockChain() | ||
db := execNode.Backend.ChainDb() | ||
|
||
|
@@ -157,17 +163,18 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { | |
func TestRecreateStateForRPCDepthLimitExceeded(t *testing.T) { | ||
ctx, cancel := context.WithCancel(context.Background()) | ||
defer cancel() | ||
nodeConfig := gethexec.ConfigDefaultTest() | ||
nodeConfig.RPC.MaxRecreateStateDepth = int64(200) | ||
nodeConfig.Sequencer.MaxBlockSpeed = 0 | ||
nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
nodeConfig.Caching.Archive = true | ||
execConfig := gethexec.ConfigDefaultTest() | ||
execConfig.RPC.MaxRecreateStateDepth = int64(200) | ||
execConfig.Sequencer.MaxBlockSpeed = 0 | ||
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
execConfig.Caching.Archive = true | ||
// disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there | ||
nodeConfig.Caching.TrieCleanCache = 0 | ||
nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 | ||
nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 | ||
_, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) | ||
execConfig.Caching.TrieCleanCache = 0 | ||
execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 | ||
execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 | ||
builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) | ||
defer cancelNode() | ||
execNode, l2client := builder.L2.ExecNode, builder.L2.Client | ||
bc := execNode.Backend.ArbInterface().BlockChain() | ||
db := execNode.Backend.ChainDb() | ||
|
||
|
@@ -191,17 +198,18 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { | |
var headerCacheLimit uint64 = 512 | ||
ctx, cancel := context.WithCancel(context.Background()) | ||
defer cancel() | ||
nodeConfig := gethexec.ConfigDefaultTest() | ||
nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth | ||
nodeConfig.Sequencer.MaxBlockSpeed = 0 | ||
nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
nodeConfig.Caching.Archive = true | ||
execConfig := gethexec.ConfigDefaultTest() | ||
execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth | ||
execConfig.Sequencer.MaxBlockSpeed = 0 | ||
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
execConfig.Caching.Archive = true | ||
// disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there | ||
nodeConfig.Caching.TrieCleanCache = 0 | ||
nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 | ||
nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 | ||
_, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, headerCacheLimit+5) | ||
execConfig.Caching.TrieCleanCache = 0 | ||
execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 | ||
execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 | ||
builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, headerCacheLimit+5) | ||
defer cancelNode() | ||
execNode, l2client := builder.L2.ExecNode, builder.L2.Client | ||
bc := execNode.Backend.ArbInterface().BlockChain() | ||
db := execNode.Backend.ChainDb() | ||
|
||
|
@@ -236,16 +244,17 @@ func TestRecreateStateForRPCBeyondGenesis(t *testing.T) { | |
ctx, cancel := context.WithCancel(context.Background()) | ||
defer cancel() | ||
|
||
nodeConfig := gethexec.ConfigDefaultTest() | ||
nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth | ||
nodeConfig.Sequencer.MaxBlockSpeed = 0 | ||
nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
nodeConfig.Caching.Archive = true | ||
execConfig := gethexec.ConfigDefaultTest() | ||
execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth | ||
execConfig.Sequencer.MaxBlockSpeed = 0 | ||
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
execConfig.Caching.Archive = true | ||
// disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there | ||
nodeConfig.Caching.TrieCleanCache = 0 | ||
nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 | ||
nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 | ||
_, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) | ||
execConfig.Caching.TrieCleanCache = 0 | ||
execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 | ||
execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 | ||
builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) | ||
execNode, l2client := builder.L2.ExecNode, builder.L2.Client | ||
defer cancelNode() | ||
bc := execNode.Backend.ArbInterface().BlockChain() | ||
db := execNode.Backend.ChainDb() | ||
|
@@ -271,17 +280,18 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { | |
var blockCacheLimit uint64 = 256 | ||
ctx, cancel := context.WithCancel(context.Background()) | ||
defer cancel() | ||
nodeConfig := gethexec.ConfigDefaultTest() | ||
nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth | ||
nodeConfig.Sequencer.MaxBlockSpeed = 0 | ||
nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
nodeConfig.Caching.Archive = true | ||
execConfig := gethexec.ConfigDefaultTest() | ||
execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth | ||
execConfig.Sequencer.MaxBlockSpeed = 0 | ||
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
execConfig.Caching.Archive = true | ||
// disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there | ||
nodeConfig.Caching.TrieCleanCache = 0 | ||
execConfig.Caching.TrieCleanCache = 0 | ||
|
||
nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 | ||
nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 | ||
_, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, blockCacheLimit+4) | ||
execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 | ||
execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 | ||
builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, blockCacheLimit+4) | ||
execNode, l2client := builder.L2.ExecNode, builder.L2.Client | ||
defer cancelNode() | ||
bc := execNode.Backend.ArbInterface().BlockChain() | ||
db := execNode.Backend.ChainDb() | ||
|
@@ -306,7 +316,7 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { | |
hash := rawdb.ReadCanonicalHash(db, lastBlock) | ||
Fatal(t, "Didn't fail to get balance at block:", lastBlock, " with hash:", hash, ", lastBlock:", lastBlock) | ||
} | ||
if !strings.Contains(err.Error(), "block not found while recreating") { | ||
if !strings.Contains(err.Error(), fmt.Sprintf("block #%d not found", blockBodyToRemove)) { | ||
Fatal(t, "Failed with unexpected error: \"", err, "\", at block:", lastBlock, "lastBlock:", lastBlock) | ||
} | ||
} | ||
|
@@ -458,3 +468,90 @@ func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { | |
} | ||
} | ||
} | ||
|
||
func TestGettingStateForRPCFullNode(t *testing.T) { | ||
ctx, cancel := context.WithCancel(context.Background()) | ||
defer cancel() | ||
execConfig := gethexec.ConfigDefaultTest() | ||
execConfig.Caching.SnapshotCache = 0 // disable snapshots | ||
execConfig.Caching.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are | ||
execConfig.Sequencer.MaxBlockSpeed = 0 | ||
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 16) | ||
execNode, _ := builder.L2.ExecNode, builder.L2.Client | ||
defer cancelNode() | ||
bc := execNode.Backend.ArbInterface().BlockChain() | ||
api := execNode.Backend.APIBackend() | ||
|
||
header := bc.CurrentBlock() | ||
if header == nil { | ||
Fatal(t, "failed to get current block header") | ||
} | ||
state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) | ||
Require(t, err) | ||
addr := builder.L2Info.GetAddress("User2") | ||
exists := state.Exist(addr) | ||
err = state.Error() | ||
Require(t, err) | ||
if !exists { | ||
Fatal(t, "User2 address does not exist in the state") | ||
} | ||
// Get the state again to avoid caching | ||
state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) | ||
Require(t, err) | ||
|
||
blockCountRequiredToFlushDirties := builder.execConfig.Caching.BlockCount | ||
makeSomeTransfers(t, ctx, builder, blockCountRequiredToFlushDirties) | ||
|
||
exists = state.Exist(addr) | ||
err = state.Error() | ||
Require(t, err) | ||
if !exists { | ||
Fatal(t, "User2 address does not exist in the state") | ||
} | ||
} | ||
|
||
func TestGettingStateForRPCHybridArchiveNode(t *testing.T) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is the logic here different from above other than config? can we merge the functions? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. addressed in #2442 |
||
ctx, cancel := context.WithCancel(context.Background()) | ||
defer cancel() | ||
execConfig := gethexec.ConfigDefaultTest() | ||
execConfig.Caching.Archive = true | ||
execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 128 | ||
execConfig.Caching.BlockCount = 128 | ||
execConfig.Caching.SnapshotCache = 0 // disable snapshots | ||
execConfig.Caching.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are | ||
execConfig.Sequencer.MaxBlockSpeed = 0 | ||
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 | ||
builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 16) | ||
execNode, _ := builder.L2.ExecNode, builder.L2.Client | ||
defer cancelNode() | ||
bc := execNode.Backend.ArbInterface().BlockChain() | ||
api := execNode.Backend.APIBackend() | ||
|
||
header := bc.CurrentBlock() | ||
if header == nil { | ||
Fatal(t, "failed to get current block header") | ||
} | ||
state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) | ||
Require(t, err) | ||
addr := builder.L2Info.GetAddress("User2") | ||
exists := state.Exist(addr) | ||
err = state.Error() | ||
Require(t, err) | ||
if !exists { | ||
Fatal(t, "User2 address does not exist in the state") | ||
} | ||
// Get the state again to avoid caching | ||
state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) | ||
Require(t, err) | ||
|
||
blockCountRequiredToFlushDirties := builder.execConfig.Caching.BlockCount | ||
makeSomeTransfers(t, ctx, builder, blockCountRequiredToFlushDirties) | ||
|
||
exists = state.Exist(addr) | ||
err = state.Error() | ||
Require(t, err) | ||
if !exists { | ||
Fatal(t, "User2 address does not exist in the state") | ||
} | ||
} |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
can we add here a check that StateAndHeaderByNumber will now fail?
Or maybe force state to drop after the end of current test - and check that StateAndHeaderByNumber fails then?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
addressed in #2442