diff --git a/cmd/geth/main.go b/cmd/geth/main.go index fe34b746f6..4ceb9c9581 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -94,6 +94,8 @@ var ( utils.TxLookupLimitFlag, utils.TransactionHistoryFlag, utils.StateHistoryFlag, + utils.ProposeBlockIntervalFlag, + utils.PathDBNodeBufferTypeFlag, utils.LightServeFlag, utils.LightIngressFlag, utils.LightEgressFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 47c6527644..fc9ae0f96a 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -301,10 +301,16 @@ var ( Usage: "Scheme to use for storing ethereum state ('hash' or 'path')", Category: flags.StateCategory, } - PathDBSyncFlag = &cli.BoolFlag{ - Name: "pathdb.sync", - Usage: "sync flush nodes cache to disk in path schema", - Value: false, + PathDBNodeBufferTypeFlag = &cli.StringFlag{ + Name: "pathdb.nodebuffer", + Usage: "Type of trienodebuffer to cache trie nodes in disklayer('list', 'sync', or 'async')", + Value: "async", + Category: flags.StateCategory, + } + ProposeBlockIntervalFlag = &cli.Uint64Flag{ + Name: "pathdb.proposeblock", + Usage: "keep the same with op-proposer propose block interval", + Value: pathdb.DefaultProposeBlockInterval, Category: flags.StateCategory, } StateHistoryFlag = &cli.Uint64Flag{ @@ -1869,8 +1875,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { log.Warn("The flag --txlookuplimit is deprecated and will be removed, please use --history.transactions") cfg.TransactionHistory = ctx.Uint64(TxLookupLimitFlag.Name) } - if ctx.IsSet(PathDBSyncFlag.Name) { - cfg.PathSyncFlush = true + if ctx.IsSet(PathDBNodeBufferTypeFlag.Name) { + cfg.PathNodeBuffer = pathdb.GetNodeBufferType(ctx.String(PathDBNodeBufferTypeFlag.Name)) + } + if ctx.IsSet(ProposeBlockIntervalFlag.Name) { + cfg.ProposeBlockInterval = ctx.Uint64(ProposeBlockIntervalFlag.Name) } if ctx.String(GCModeFlag.Name) == "archive" && cfg.TransactionHistory != 0 { cfg.TransactionHistory = 0 diff --git a/core/blockchain.go b/core/blockchain.go index b42db6368b..1dd7490ee3 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -145,19 +145,19 @@ const ( // CacheConfig contains the configuration values for the trie database // and state snapshot these are resident in a blockchain. type CacheConfig struct { - TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory - TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks - TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk - TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node) - TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk - SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory - Preimages bool // Whether to store preimage of trie key to the disk - StateHistory uint64 // Number of blocks from head whose state histories are reserved. - StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top - PathSyncFlush bool // Whether sync flush the trienodebuffer of pathdb to disk. - - SnapshotNoBuild bool // Whether the background generation is allowed - SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it + TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory + TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks + TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk + TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node) + TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk + SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory + Preimages bool // Whether to store preimage of trie key to the disk + StateHistory uint64 // Number of blocks from head whose state histories are reserved. + StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top + PathNodeBuffer pathdb.NodeBufferType // Type of trienodebuffer to cache trie nodes in disklayer + ProposeBlockInterval uint64 // Propose block to L1 block interval. + SnapshotNoBuild bool // Whether the background generation is allowed + SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it TrieCommitInterval uint64 // Define a block height interval, commit trie every TrieCommitInterval block height. } @@ -172,10 +172,11 @@ func (c *CacheConfig) triedbConfig() *trie.Config { } if c.StateScheme == rawdb.PathScheme { config.PathDB = &pathdb.Config{ - SyncFlush: c.PathSyncFlush, - StateHistory: c.StateHistory, - CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, - DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024, + TrieNodeBufferType: c.PathNodeBuffer, + StateHistory: c.StateHistory, + CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, + DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024, + ProposeBlockInterval: c.ProposeBlockInterval, } } return config diff --git a/eth/backend.go b/eth/backend.go index b989803f17..765b96caa5 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -197,17 +197,18 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { EnableOpcodeOptimizations: config.EnableOpcodeOptimizing, } cacheConfig = &core.CacheConfig{ - TrieCleanLimit: config.TrieCleanCache, - TrieCleanNoPrefetch: config.NoPrefetch, - TrieDirtyLimit: config.TrieDirtyCache, - TrieDirtyDisabled: config.NoPruning, - TrieTimeLimit: config.TrieTimeout, - SnapshotLimit: config.SnapshotCache, - Preimages: config.Preimages, - StateHistory: config.StateHistory, - StateScheme: scheme, - TrieCommitInterval: config.TrieCommitInterval, - PathSyncFlush: config.PathSyncFlush, + TrieCleanLimit: config.TrieCleanCache, + TrieCleanNoPrefetch: config.NoPrefetch, + TrieDirtyLimit: config.TrieDirtyCache, + TrieDirtyDisabled: config.NoPruning, + TrieTimeLimit: config.TrieTimeout, + SnapshotLimit: config.SnapshotCache, + Preimages: config.Preimages, + StateHistory: config.StateHistory, + StateScheme: scheme, + TrieCommitInterval: config.TrieCommitInterval, + PathNodeBuffer: config.PathNodeBuffer, + ProposeBlockInterval: config.ProposeBlockInterval, } ) // Override the chain config with provided settings. diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 077ffd6e1b..30b2b848af 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) // FullNodeGPO contains default gasprice oracle settings for full node. @@ -110,8 +111,9 @@ type Config struct { // State scheme represents the scheme used to store ethereum states and trie // nodes on top. It can be 'hash', 'path', or none which means use the scheme // consistent with persistent state. - StateScheme string `toml:",omitempty"` - PathSyncFlush bool `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top + StateScheme string `toml:",omitempty"` + PathNodeBuffer pathdb.NodeBufferType `toml:",omitempty"` // Type of trienodebuffer to cache trie nodes in disklayer + ProposeBlockInterval uint64 `toml:",omitempty"` // Keep the same with op-proposer propose block interval // RequiredBlocks is a set of block number -> hash mappings which must be in the // canonical chain of all remote peers. Setting the option makes geth verify the diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index b006e034fd..f93e697f83 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/miner" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) // MarshalTOML marshals as TOML. @@ -28,7 +29,8 @@ func (c Config) MarshalTOML() (interface{}, error) { TransactionHistory uint64 `toml:",omitempty"` StateHistory uint64 `toml:",omitempty"` StateScheme string `toml:",omitempty"` - PathSyncFlush bool `toml:",omitempty"` + ProposeBlockInterval uint64 `toml:",omitempty"` + PathNodeBuffer pathdb.NodeBufferType `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` LightServ int `toml:",omitempty"` LightIngress int `toml:",omitempty"` @@ -79,7 +81,8 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.TransactionHistory = c.TransactionHistory enc.StateHistory = c.StateHistory enc.StateScheme = c.StateScheme - enc.PathSyncFlush = c.PathSyncFlush + enc.ProposeBlockInterval = c.ProposeBlockInterval + enc.PathNodeBuffer = c.PathNodeBuffer enc.RequiredBlocks = c.RequiredBlocks enc.LightServ = c.LightServ enc.LightIngress = c.LightIngress @@ -134,7 +137,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { TransactionHistory *uint64 `toml:",omitempty"` StateHistory *uint64 `toml:",omitempty"` StateScheme *string `toml:",omitempty"` - PathSyncFlush *bool `toml:",omitempty"` + ProposeBlockInterval *uint64 `toml:",omitempty"` + PathNodeBuffer *pathdb.NodeBufferType `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` LightServ *int `toml:",omitempty"` LightIngress *int `toml:",omitempty"` @@ -210,8 +214,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.StateScheme != nil { c.StateScheme = *dec.StateScheme } - if dec.PathSyncFlush != nil { - c.PathSyncFlush = *dec.PathSyncFlush + if dec.ProposeBlockInterval != nil { + c.ProposeBlockInterval = *dec.ProposeBlockInterval + } + if dec.PathNodeBuffer != nil { + c.PathNodeBuffer = *dec.PathNodeBuffer } if dec.RequiredBlocks != nil { c.RequiredBlocks = dec.RequiredBlocks diff --git a/go.mod b/go.mod index afb29570d3..1ce804bfa4 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/ethereum/go-ethereum -go 1.20 +go 1.21 + +//toolchain go1.22.0 require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 diff --git a/go.sum b/go.sum index 413bdc1848..bb9fdca4ef 100644 --- a/go.sum +++ b/go.sum @@ -51,9 +51,11 @@ github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuI github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= @@ -68,10 +70,13 @@ github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= @@ -189,6 +194,7 @@ github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= +github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -233,6 +239,7 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= @@ -247,6 +254,7 @@ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1: github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= +github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= @@ -262,6 +270,7 @@ github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d h1:49RLWk1j44Xu4fjHb6JFYmeUnDORVwHNkDxaQ0ctCVU= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= github.com/cosmos/gogoproto v1.4.11 h1:LZcMHrx4FjUgrqQSWeaGC1v/TeuVFqSLa43CC6aWR2g= github.com/cosmos/gogoproto v1.4.11/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= @@ -273,6 +282,7 @@ github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbV github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= +github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= @@ -291,6 +301,7 @@ github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14y github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= @@ -302,6 +313,7 @@ github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlN github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.4-0.20210318174700-74754f61e018/go.mod h1:MIonLggsKgZLUSt414ExgwNtlOL5MuEoAJP514mwGe8= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= @@ -316,6 +328,7 @@ github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwu github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY= github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -409,6 +422,7 @@ github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclK github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -521,6 +535,7 @@ github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85q github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.1.1-0.20171103154506-982329095285/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -551,6 +566,7 @@ github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -606,6 +622,7 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.0.1/go.mod h1:oVMjMN64nzEcepv1kdZKg github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -618,6 +635,7 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= +github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= @@ -739,6 +757,7 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/joonix/log v0.0.0-20200409080653-9c1d2ceb5f1d/go.mod h1:fS54ONkjDV71zS9CDx3V9K21gJg7byKSvI4ajuWFNJw= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -822,6 +841,7 @@ github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7 github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= @@ -1004,6 +1024,7 @@ github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjK github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= @@ -1131,6 +1152,7 @@ github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je4 github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw= @@ -1169,6 +1191,7 @@ github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1181,6 +1204,7 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= @@ -1452,6 +1476,7 @@ github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1960,7 +1985,9 @@ google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210426193834-eac7f76ac494/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.2.1-0.20170921194603-d4b75ebd4f9f/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= diff --git a/tests/block_test_util.go b/tests/block_test_util.go index ad1d34fb2b..e2afa0608c 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -130,6 +130,7 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, tracer vm.EVMLogger) er if err != nil { return err } + triedb.Journal(triedb.Head()) triedb.Close() // close the db to prevent memory leak if gblock.Hash() != t.json.Genesis.Hash { diff --git a/tests/state_test.go b/tests/state_test.go index 42b46e9c40..632ad574ec 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -224,7 +224,10 @@ func runBenchmark(b *testing.B, t *StateTest) { vmconfig.ExtraEips = eips block := t.genesis(config).ToBlock() triedb, _, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, false, rawdb.HashScheme) - defer triedb.Close() + defer func() { + triedb.Journal(triedb.Head()) + triedb.Close() + }() var baseFee *big.Int if rules.IsLondon { diff --git a/tests/state_test_util.go b/tests/state_test_util.go index d09b29b762..7fd97b917c 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -198,6 +198,7 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bo postCheck(result, snaps, statedb) if triedb != nil { + triedb.Journal(triedb.Head()) triedb.Close() } }() @@ -247,6 +248,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh post := t.json.Post[subtest.Fork][subtest.Index] msg, err := t.json.Tx.toMessage(post, baseFee) if err != nil { + triedb.Journal(triedb.Head()) triedb.Close() return nil, nil, nil, common.Hash{}, err } @@ -256,11 +258,13 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh var ttx types.Transaction err := ttx.UnmarshalBinary(post.TxBytes) if err != nil { + triedb.Journal(triedb.Head()) triedb.Close() return nil, nil, nil, common.Hash{}, err } if _, err := types.Sender(types.LatestSigner(config), &ttx); err != nil { + triedb.Journal(triedb.Head()) triedb.Close() return nil, nil, nil, common.Hash{}, err } diff --git a/trie/triedb/pathdb/asyncnodebuffer.go b/trie/triedb/pathdb/asyncnodebuffer.go index a94fab4461..4e41e8173c 100644 --- a/trie/triedb/pathdb/asyncnodebuffer.go +++ b/trie/triedb/pathdb/asyncnodebuffer.go @@ -41,6 +41,7 @@ func newAsyncNodeBuffer(limit int, nodes map[common.Hash]map[string]*trienode.No } } + log.Info("new async node buffer", "limit", common.StorageSize(limit), "layers", layers) return &asyncnodebuffer{ current: newNodeCache(uint64(limit), size, nodes, layers), background: newNodeCache(uint64(limit), 0, make(map[common.Hash]map[string]*trienode.Node), 0), @@ -66,7 +67,7 @@ func (a *asyncnodebuffer) node(owner common.Hash, path []byte, hash common.Hash) // the ownership of the nodes map which belongs to the bottom-most diff layer. // It will just hold the node references from the given map which are safe to // copy. -func (a *asyncnodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) trienodebuffer { +func (a *asyncnodebuffer) commit(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node) trienodebuffer { a.mux.Lock() defer a.mux.Unlock() @@ -164,6 +165,7 @@ func (a *asyncnodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, return nil } +// waitAndStopFlushing will block unit writing the trie nodes of trienodebuffer to disk. func (a *asyncnodebuffer) waitAndStopFlushing() { a.stopFlushing.Store(true) for a.isFlushing.Load() { @@ -172,6 +174,7 @@ func (a *asyncnodebuffer) waitAndStopFlushing() { } } +// getAllNodes return all the trie nodes are cached in trienodebuffer. func (a *asyncnodebuffer) getAllNodes() map[common.Hash]map[string]*trienode.Node { a.mux.Lock() defer a.mux.Unlock() @@ -183,6 +186,7 @@ func (a *asyncnodebuffer) getAllNodes() map[common.Hash]map[string]*trienode.Nod return cached.nodes } +// getLayers return the size of cached difflayers. func (a *asyncnodebuffer) getLayers() uint64 { a.mux.RLock() defer a.mux.RUnlock() @@ -190,6 +194,7 @@ func (a *asyncnodebuffer) getLayers() uint64 { return a.current.layers + a.background.layers } +// getSize return the trienodebuffer used size. func (a *asyncnodebuffer) getSize() (uint64, uint64) { a.mux.RLock() defer a.mux.RUnlock() @@ -197,6 +202,17 @@ func (a *asyncnodebuffer) getSize() (uint64, uint64) { return a.current.size, a.background.size } +// setClean set fastcache to trienodebuffer for cache the trie nodes, +// used for nodebufferlist. +func (a *asyncnodebuffer) setClean(clean *fastcache.Cache) { + return +} + +// proposedBlockReader return the world state Reader of block that is proposed to L1. +func (a *asyncnodebuffer) proposedBlockReader(blockRoot common.Hash) (layer, error) { + return nil, errors.New("async node buffer not support to get proposed block reader") +} + type nodecache struct { layers uint64 // The number of diff layers aggregated inside size uint64 // The size of aggregated writes diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go index 8f330e988c..1d22189068 100644 --- a/trie/triedb/pathdb/database.go +++ b/trie/triedb/pathdb/database.go @@ -94,11 +94,12 @@ type layer interface { // Config contains the settings for database. type Config struct { - SyncFlush bool // Flag of trienodebuffer sync flush cache to disk - StateHistory uint64 // Number of recent blocks to maintain state history for - CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes - DirtyCacheSize int // Maximum memory allowance (in bytes) for caching dirty nodes - ReadOnly bool // Flag whether the database is opened in read only mode. + TrieNodeBufferType NodeBufferType // Type of trienodebuffer to cache trie nodes in disklayer + StateHistory uint64 // Number of recent blocks to maintain state history for + CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes + DirtyCacheSize int // Maximum memory allowance (in bytes) for caching dirty nodes + ReadOnly bool // Flag whether the database is opened in read only mode. + ProposeBlockInterval uint64 // Propose block to L1 block interval. } // sanitize checks the provided user configurations and changes anything that's @@ -106,6 +107,7 @@ type Config struct { func (c *Config) sanitize() *Config { conf := *c if conf.DirtyCacheSize > maxBufferSize { + conf.CleanCacheSize = conf.DirtyCacheSize - maxBufferSize log.Warn("Sanitizing invalid node buffer size", "provided", common.StorageSize(conf.DirtyCacheSize), "updated", common.StorageSize(maxBufferSize)) conf.DirtyCacheSize = maxBufferSize } @@ -195,7 +197,7 @@ func New(diskdb ethdb.Database, config *Config) *Database { log.Crit("Failed to disable database", "err", err) // impossible to happen } } - log.Warn("Path-based state scheme is an experimental feature", "sync", db.config.SyncFlush) + log.Warn("Path-based state scheme is an experimental feature") return db } @@ -203,6 +205,10 @@ func New(diskdb ethdb.Database, config *Config) *Database { func (db *Database) Reader(root common.Hash) (layer, error) { l := db.tree.get(root) if l == nil { + r, err := db.tree.bottom().buffer.proposedBlockReader(root) + if err == nil && r != nil { + return r, nil + } return nil, fmt.Errorf("state %#x is not available", root) } return l, nil @@ -312,7 +318,10 @@ func (db *Database) Enable(root common.Hash) error { } // Re-construct a new disk layer backed by persistent state // with **empty clean cache and node buffer**. - db.tree.reset(newDiskLayer(root, 0, db, nil, NewTrieNodeBuffer(db.config.SyncFlush, db.bufferSize, nil, 0))) + nb := NewTrieNodeBuffer(db.diskdb, db.config.TrieNodeBufferType, db.bufferSize, nil, 0, db.config.ProposeBlockInterval) + dl := newDiskLayer(root, 0, db, nil, nb) + nb.setClean(dl.cleans) + db.tree.reset(dl) // Re-enable the database as the final step. db.waitSync = false diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go index 912364f7f4..33896bfa1c 100644 --- a/trie/triedb/pathdb/database_test.go +++ b/trie/triedb/pathdb/database_test.go @@ -443,6 +443,8 @@ func TestDisable(t *testing.T) { tester := newTester(t) defer tester.release() + index := len(tester.roots)/2 + 1 + tester.db.Commit(tester.roots[index], false) _, stored := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) if err := tester.db.Disable(); err != nil { t.Fatal("Failed to deactivate database") diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go index 82023c669a..5804cbcd11 100644 --- a/trie/triedb/pathdb/disklayer.go +++ b/trie/triedb/pathdb/disklayer.go @@ -43,7 +43,7 @@ type trienodebuffer interface { // the ownership of the nodes map which belongs to the bottom-most diff layer. // It will just hold the node references from the given map which are safe to // copy. - commit(nodes map[common.Hash]map[string]*trienode.Node) trienodebuffer + commit(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node) trienodebuffer // revert is the reverse operation of commit. It also merges the provided nodes // into the trienodebuffer, the difference is that the provided node set should @@ -75,15 +75,56 @@ type trienodebuffer interface { // waitAndStopFlushing will block unit writing the trie nodes of trienodebuffer to disk. waitAndStopFlushing() + + // setClean set fastcache to trienodebuffer for cache the trie nodes, used for nodebufferlist. + setClean(clean *fastcache.Cache) + + // proposedBlockReader return the world state Reader of block that is proposed to L1. + proposedBlockReader(blockRoot common.Hash) (layer, error) +} + +type NodeBufferType int32 + +const ( + AsyncNodeBuffer NodeBufferType = 0 + SyncNodeBuffer NodeBufferType = 1 + NodeBufferList NodeBufferType = 2 +) + +var ( + nodeBufferStringToType = map[string]NodeBufferType{ + "async": AsyncNodeBuffer, + "sync": SyncNodeBuffer, + "list": NodeBufferList, + } + + nodeBufferTypeToString = map[NodeBufferType]string{ + AsyncNodeBuffer: "async", + SyncNodeBuffer: "sync", + NodeBufferList: "list", + } +) + +func GetNodeBufferType(name string) NodeBufferType { + if _, ok := nodeBufferStringToType[name]; !ok { + log.Warn("node buffer type mismatch", "provide", name, "adjust to default", nodeBufferTypeToString[NodeBufferList]) + return NodeBufferList + } + return nodeBufferStringToType[name] } -func NewTrieNodeBuffer(sync bool, limit int, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) trienodebuffer { - if sync { - log.Info("New sync node buffer", "limit", common.StorageSize(limit), "layers", layers) +func NewTrieNodeBuffer(db ethdb.Database, trieNodeBufferType NodeBufferType, limit int, nodes map[common.Hash]map[string]*trienode.Node, layers, proposeBlockInterval uint64) trienodebuffer { + log.Info("init trie node buffer", "type", nodeBufferTypeToString[trieNodeBufferType]) + switch trieNodeBufferType { + case NodeBufferList: + return newNodeBufferList(db, uint64(limit), nodes, layers, proposeBlockInterval) + case AsyncNodeBuffer: + return newAsyncNodeBuffer(limit, nodes, layers) + case SyncNodeBuffer: return newNodeBuffer(limit, nodes, layers) + default: + return newAsyncNodeBuffer(limit, nodes, layers) } - log.Info("New async node buffer", "limit", common.StorageSize(limit), "layers", layers) - return newAsyncNodeBuffer(limit, nodes, layers) } // diskLayer is a low level persistent layer built on top of a key-value store. @@ -253,7 +294,7 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) { // diff layer, and flush the content in disk layer if there are too // many nodes cached. The clean cache is inherited from the original // disk layer for reusing. - ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.cleans, dl.buffer.commit(bottom.nodes)) + ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.cleans, dl.buffer.commit(bottom.root, bottom.id, bottom.block, bottom.nodes)) err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force) if err != nil { return nil, err diff --git a/trie/triedb/pathdb/errors.go b/trie/triedb/pathdb/errors.go index 46b986f884..6b9fa0317e 100644 --- a/trie/triedb/pathdb/errors.go +++ b/trie/triedb/pathdb/errors.go @@ -63,6 +63,9 @@ var ( // errRevertImmutable is returned if revert the background immutable nodecache errRevertImmutable = errors.New("revert immutable nodecache") + + // errNoProposedBlockDifflayer is returned if difflayers is not enough. + errNoProposedBlockDifflayer = errors.New("no proposed block difflayer") ) func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte, blob []byte) error { diff --git a/trie/triedb/pathdb/journal.go b/trie/triedb/pathdb/journal.go index f86d857560..98fe99390d 100644 --- a/trie/triedb/pathdb/journal.go +++ b/trie/triedb/pathdb/journal.go @@ -130,7 +130,10 @@ func (db *Database) loadLayers() layer { log.Info("Failed to load journal, discard it", "err", err) } // Return single layer with persistent state. - return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, NewTrieNodeBuffer(db.config.SyncFlush, db.bufferSize, nil, 0)) + nb := NewTrieNodeBuffer(db.diskdb, db.config.TrieNodeBufferType, db.bufferSize, nil, 0, db.config.ProposeBlockInterval) + dl := newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, nb) + nb.setClean(dl.cleans) + return dl } // loadDiskLayer reads the binary blob from the layer journal, reconstructing @@ -170,7 +173,9 @@ func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) { nodes[entry.Owner] = subset } // Calculate the internal state transitions by id difference. - base := newDiskLayer(root, id, db, nil, NewTrieNodeBuffer(db.config.SyncFlush, db.bufferSize, nodes, id-stored)) + nb := NewTrieNodeBuffer(db.diskdb, db.config.TrieNodeBufferType, db.bufferSize, nodes, id-stored, db.config.ProposeBlockInterval) + base := newDiskLayer(root, id, db, nil, nb) + nb.setClean(base.cleans) return base, nil } diff --git a/trie/triedb/pathdb/metrics.go b/trie/triedb/pathdb/metrics.go index 9e2b1dcbf5..ee91e5dc20 100644 --- a/trie/triedb/pathdb/metrics.go +++ b/trie/triedb/pathdb/metrics.go @@ -47,4 +47,18 @@ var ( historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil) historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil) historyIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/index", nil) + + // only for node buffer list + nodeBufferListSizeGauge = metrics.NewRegisteredGauge("pathdb/nodebufferlist/size", nil) + nodeBufferListCountGauge = metrics.NewRegisteredGauge("pathdb/nodebufferlist/count", nil) + nodeBufferListLayerGauge = metrics.NewRegisteredGauge("pathdb/nodebufferlist/layer", nil) + nodeBufferListPersistIDGauge = metrics.NewRegisteredGauge("pathdb/nodebufferlist/persistid", nil) + nodeBufferListLastBlockGauge = metrics.NewRegisteredGauge("pathdb/nodebufferlist/lastblock", nil) + nodeBufferListLastStateIdGauge = metrics.NewRegisteredGauge("pathdb/nodebufferlist/laststateid", nil) + nodeBufferListDifflayerAvgSize = metrics.NewRegisteredGauge("pathdb/nodebufferlist/difflayeravgsize", nil) + baseNodeBufferSizeGauge = metrics.NewRegisteredGauge("pathdb/basenodebuffer/size", nil) + baseNodeBufferLayerGauge = metrics.NewRegisteredGauge("pathdb/basenodebuffer/layer", nil) + baseNodeBufferDifflayerAvgSize = metrics.NewRegisteredGauge("pathdb/basenodebuffer/difflayeravgsize", nil) + proposedBlockReaderSuccess = metrics.NewRegisteredMeter("pathdb/nodebufferlist/proposedblockreader/success", nil) + proposedBlockReaderMismatch = metrics.NewRegisteredMeter("pathdb/nodebufferlist/proposedblockreader/mismatch", nil) ) diff --git a/trie/triedb/pathdb/nodebuffer.go b/trie/triedb/pathdb/nodebuffer.go index 4c4bc73ee3..4ecd83b4fb 100644 --- a/trie/triedb/pathdb/nodebuffer.go +++ b/trie/triedb/pathdb/nodebuffer.go @@ -17,6 +17,7 @@ package pathdb import ( + "errors" "fmt" "time" @@ -52,6 +53,8 @@ func newNodeBuffer(limit int, nodes map[common.Hash]map[string]*trienode.Node, l size += uint64(len(n.Blob) + len(path)) } } + + log.Info("new sync node buffer", "limit", common.StorageSize(limit), "layers", layers) return &nodebuffer{ layers: layers, nodes: nodes, @@ -82,7 +85,7 @@ func (b *nodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*tr // the ownership of the nodes map which belongs to the bottom-most diff layer. // It will just hold the node references from the given map which are safe to // copy. -func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) trienodebuffer { +func (b *nodebuffer) commit(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node) trienodebuffer { var ( delta int64 overwrite int64 @@ -291,4 +294,16 @@ func (b *nodebuffer) getLayers() uint64 { return b.layers } +// waitAndStopFlushing will block unit writing the trie nodes of trienodebuffer to disk. func (b *nodebuffer) waitAndStopFlushing() {} + +// setClean set fastcache to trienodebuffer for cache the trie nodes, +// used for nodebufferlist. +func (b *nodebuffer) setClean(clean *fastcache.Cache) { + return +} + +// proposedBlockReader return the world state Reader of block that is proposed to L1. +func (b *nodebuffer) proposedBlockReader(blockRoot common.Hash) (layer, error) { + return nil, errors.New("anode buffer not support to get proposed block reader") +} diff --git a/trie/triedb/pathdb/nodebufferlist.go b/trie/triedb/pathdb/nodebufferlist.go new file mode 100644 index 0000000000..2a344003be --- /dev/null +++ b/trie/triedb/pathdb/nodebufferlist.go @@ -0,0 +1,866 @@ +package pathdb + +import ( + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "github.com/VictoriaMetrics/fastcache" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +const ( + // mergeMultiDifflayerInterval defines the interval to collect nodes to flush disk. + mergeMultiDifflayerInterval = 3 + + // DefaultProposeBlockInterval defines the interval of op-proposer proposes block. + DefaultProposeBlockInterval = 3600 + + // DefaultReserveMultiDifflayerNumber defines the default reserve number of multiDifflayer in nodebufferlist. + DefaultReserveMultiDifflayerNumber = 3 +) + +var _ trienodebuffer = &nodebufferlist{} + +// nodebufferlist implements the trienodebuffer interface, it is designed to meet +// the withdraw proof function of opBNB at the storage layer while taking into +// account high performance. It is a multiDifflayer based queue that stores +// mergeBlockInterval compressed block difflayers per multiDifflayer. It also has +// one base multiDifflayer that collects the list's trie nodes to write disk. +type nodebufferlist struct { + db ethdb.Database // Persistent storage for matured trie nodes. + clean *fastcache.Cache // GC friendly memory cache of clean node RLPs. + wpBlocks uint64 // Propose block to L1 block interval. + rsevMdNum uint64 // Reserve number of multiDifflayer in nodebufferlist. + dlInMd uint64 // Difflayer number in multiDifflayer. + + limit uint64 // The maximum memory allowance in bytes for base multiDifflayer. + block uint64 // Corresponding last update block number. + stateId uint64 // Corresponding last update state id. + size uint64 // Size of nodebufferlist + count uint64 // Count of multiDifflayer in nodebufferlist + layers uint64 // Layers in nodebufferlist + head *multiDifflayer // The first element of nodebufferlist. + tail *multiDifflayer // The last element of nodebufferlist. + mux sync.RWMutex + + base *multiDifflayer // Collect the nodes of nodebufferlist and write to disk. + persistID uint64 // The last state id that have written to disk. + baseMux sync.RWMutex // The mutex of base multiDifflayer and persistID. + flushMux sync.RWMutex // The mutex of flushing base multiDifflayer for reorg corner case. + + isFlushing atomic.Bool // Flag indicates writing disk under background. + stopFlushing atomic.Bool // Flag stops writing disk under background. + stopCh chan struct{} +} + +// newNodeBufferList initializes the node buffer list with the provided nodes +func newNodeBufferList( + db ethdb.Database, + limit uint64, + nodes map[common.Hash]map[string]*trienode.Node, + layers uint64, + proposeBlockInterval uint64) *nodebufferlist { + var ( + rsevMdNum uint64 + dlInMd uint64 + wpBlocks = proposeBlockInterval + ) + if wpBlocks == 0 { + rsevMdNum = DefaultReserveMultiDifflayerNumber + wpBlocks = DefaultProposeBlockInterval + dlInMd = DefaultProposeBlockInterval / (DefaultReserveMultiDifflayerNumber - 1) + } else if wpBlocks%(DefaultReserveMultiDifflayerNumber-1) == 0 { + rsevMdNum = DefaultReserveMultiDifflayerNumber + dlInMd = wpBlocks / (DefaultReserveMultiDifflayerNumber - 1) + } else { + rsevMdNum = 1 + dlInMd = wpBlocks + } + + if nodes == nil { + nodes = make(map[common.Hash]map[string]*trienode.Node) + } + var size uint64 + for _, subset := range nodes { + for path, n := range subset { + size += uint64(len(n.Blob) + len(path)) + } + } + base := newMultiDifflayer(limit, size, common.Hash{}, nodes, layers) + ele := newMultiDifflayer(limit, 0, common.Hash{}, make(map[common.Hash]map[string]*trienode.Node), 0) + nf := &nodebufferlist{ + db: db, + wpBlocks: wpBlocks, + rsevMdNum: rsevMdNum, + dlInMd: dlInMd, + limit: limit, + base: base, + head: ele, + tail: ele, + count: 1, + persistID: rawdb.ReadPersistentStateID(db), + stopCh: make(chan struct{}), + } + go nf.loop() + + log.Info("new node buffer list", "proposed block interval", nf.wpBlocks, + "reserve multi difflayers", nf.rsevMdNum, "difflayers in multidifflayer", nf.dlInMd, + "limit", common.StorageSize(limit), "layers", layers, "persist id", nf.persistID) + return nf +} + +// node retrieves the trie node with given node info. +func (nf *nodebufferlist) node(owner common.Hash, path []byte, hash common.Hash) (node *trienode.Node, err error) { + nf.mux.RLock() + find := func(nc *multiDifflayer) bool { + subset, ok := nc.nodes[owner] + if !ok { + return true + } + n, ok := subset[string(path)] + if !ok { + return true + } + if n.Hash != hash { + log.Error("Unexpected trie node in node buffer list", "owner", owner, "path", path, "expect", hash, "got", n.Hash) + err = newUnexpectedNodeError("dirty", hash, n.Hash, owner, path, n.Blob) + return false + } + node = n + return false + } + nf.traverse(find) + if err != nil { + nf.mux.RUnlock() + return nil, err + } + if node != nil { + nf.mux.RUnlock() + return node, nil + } + nf.mux.RUnlock() + + nf.baseMux.RLock() + node, err = nf.base.node(owner, path, hash) + nf.baseMux.RUnlock() + return node, err +} + +// commit merges the dirty nodes into the trienodebuffer. This operation won't take +// the ownership of the nodes map which belongs to the bottom-most diff layer. +// It will just hold the node references from the given map which are safe to +// copy. +func (nf *nodebufferlist) commit(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node) trienodebuffer { + nf.mux.Lock() + defer nf.mux.Unlock() + + if nf.head == nil { + nf.head = newMultiDifflayer(nf.limit, 0, common.Hash{}, make(map[common.Hash]map[string]*trienode.Node), 0) + nf.tail = nf.head + } + oldSize := nf.head.size + err := nf.head.commit(root, id, block, 1, nodes) + if err != nil { + log.Crit("failed to commit nodes to node buffer list", "error", err) + } + + nf.stateId = id + nf.block = block + nf.size = nf.size + nf.head.size - oldSize + nf.layers++ + + nodeBufferListSizeGauge.Update(int64(nf.size)) + nodeBufferListLayerGauge.Update(int64(nf.layers)) + nodeBufferListLastStateIdGauge.Update(int64(nf.stateId)) + nodeBufferListLastBlockGauge.Update(int64(nf.block)) + + if block != 0 && block%nf.dlInMd == 0 { + nc := newMultiDifflayer(nf.limit, 0, common.Hash{}, make(map[common.Hash]map[string]*trienode.Node), 0) + nf.pushFront(nc) + } + return nf +} + +// revert is the reverse operation of commit. It also merges the provided nodes +// into the trienodebuffer, the difference is that the provided node set should +// revert the changes made by the last state transition. +func (nf *nodebufferlist) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error { + // hang user read/write and background write, + nf.mux.Lock() + nf.baseMux.Lock() + nf.flushMux.Lock() + defer nf.mux.Unlock() + defer nf.baseMux.Unlock() + defer nf.flushMux.Unlock() + + merge := func(buffer *multiDifflayer) bool { + if err := nf.base.commit(buffer.root, buffer.id, buffer.block, buffer.layers, buffer.nodes); err != nil { + log.Crit("failed to commit nodes to base node buffer", "error", err) + } + _ = nf.popBack() + return true + } + nf.traverseReverse(merge) + nc := newMultiDifflayer(nf.limit, 0, common.Hash{}, make(map[common.Hash]map[string]*trienode.Node), 0) + nf.head = nc + nf.tail = nc + nf.size = 0 + nf.layers = 0 + nf.count = 1 + return nf.base.revert(nf.db, nodes) +} + +// flush persists the in-memory dirty trie node into the disk if the configured +// memory threshold is reached. Note, all data must be written atomically. +func (nf *nodebufferlist) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error { + if nf.clean == nil { + nf.clean = clean + } + if !force { + return nil + } + + // hang user read/write and background write + nf.mux.Lock() + nf.baseMux.Lock() + nf.flushMux.Lock() + defer nf.mux.Unlock() + defer nf.baseMux.Unlock() + defer nf.flushMux.Unlock() + + nf.stopFlushing.Store(true) + defer nf.stopFlushing.Store(false) + for { + if nf.isFlushing.Swap(true) { + time.Sleep(time.Duration(DefaultBackgroundFlushInterval) * time.Second) + log.Info("waiting base node buffer to be flushed to disk") + continue + } else { + break + } + } + + commitFunc := func(buffer *multiDifflayer) bool { + if err := nf.base.commit(buffer.root, buffer.id, buffer.block, buffer.layers, buffer.nodes); err != nil { + log.Crit("failed to commit nodes to base node buffer", "error", err) + } + _ = nf.popBack() + return true + } + nf.traverseReverse(commitFunc) + persistID := nf.persistID + nf.base.layers + err := nf.base.flush(nf.db, nf.clean, persistID) + if err != nil { + log.Crit("failed to flush base node buffer to disk", "error", err) + } + nf.isFlushing.Store(false) + nf.base.reset() + nf.persistID = persistID + return nil +} + +// setSize sets the buffer size to the provided number, and invokes a flush +// operation if the current memory usage exceeds the new limit. +func (nf *nodebufferlist) setSize(size int, db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error { + return errors.New("node buffer list not supported") +} + +// reset cleans up the disk cache. +func (nf *nodebufferlist) reset() { + nf.mux.Lock() + nf.baseMux.Lock() + defer nf.mux.Unlock() + defer nf.baseMux.Unlock() + + mf := newMultiDifflayer(nf.limit, 0, common.Hash{}, make(map[common.Hash]map[string]*trienode.Node), 0) + nf.head = mf + nf.tail = mf + nf.size = 0 + nf.count = 1 + nf.layers = 0 + nf.base.reset() +} + +// empty returns an indicator if trienodebuffer contains any state transition inside +func (nf *nodebufferlist) empty() bool { + return nf.getLayers() == 0 +} + +// getSize return the trienodebuffer used size. +func (nf *nodebufferlist) getSize() (uint64, uint64) { + // no lock, the return vals are used to log, not strictly correct + return nf.size, nf.base.size +} + +// getAllNodes return all the trie nodes are cached in trienodebuffer. +func (nf *nodebufferlist) getAllNodes() map[common.Hash]map[string]*trienode.Node { + nf.mux.Lock() + nf.baseMux.Lock() + defer nf.mux.Unlock() + defer nf.baseMux.Unlock() + + nc := newMultiDifflayer(nf.limit, 0, common.Hash{}, make(map[common.Hash]map[string]*trienode.Node), 0) + if err := nc.commit(nf.base.root, nf.base.id, nf.base.block, nf.layers, nf.base.nodes); err != nil { + log.Crit("failed to commit nodes to node buffer", "error", err) + } + merge := func(buffer *multiDifflayer) bool { + if err := nc.commit(buffer.root, buffer.id, buffer.block, buffer.layers, buffer.nodes); err != nil { + log.Crit("failed to commit nodes to node buffer", "error", err) + } + return true + } + nf.traverseReverse(merge) + return nc.nodes +} + +// getLayers return the size of cached difflayers. +func (nf *nodebufferlist) getLayers() uint64 { + nf.mux.RLock() + nf.baseMux.RLock() + defer nf.mux.RUnlock() + defer nf.baseMux.RUnlock() + + return nf.layers + nf.base.layers +} + +// waitAndStopFlushing will block unit writing the trie nodes of trienodebuffer to disk. +func (nf *nodebufferlist) waitAndStopFlushing() { + close(nf.stopCh) + nf.stopFlushing.Store(true) + for nf.isFlushing.Load() { + time.Sleep(time.Second) + log.Warn("waiting background node buffer to be flushed to disk") + } +} + +// setClean sets fastcache to trienodebuffer for cache the trie nodes, used for nodebufferlist. +func (nf *nodebufferlist) setClean(clean *fastcache.Cache) { + nf.clean = clean +} + +// pushFront push cache to the nodebufferlist head. +func (nf *nodebufferlist) pushFront(cache *multiDifflayer) { + if cache == nil { + return + } + if nf.head == nil { + nf.head = cache + nf.tail = cache + cache.next = nil + cache.pre = nil + return + } + cache.pre = nil + cache.next = nf.head + nf.head.pre = cache + nf.head = cache + + nf.size += cache.size + nf.layers += cache.layers + nf.count++ + + return +} + +// pop the nodebufferlist tail element. +func (nf *nodebufferlist) popBack() *multiDifflayer { + if nf.tail == nil { + return nil + } + if nf.head == nf.tail { + nf.head = nil + nf.tail = nil + return nil + } + tag := nf.tail + nf.tail = nf.tail.pre + if nf.tail != nil { + nf.tail.next = nil + } + + nf.size -= tag.size + if nf.size < 0 { + log.Warn("node buffer list size less 0", "old", nf.size, "dealt", tag.size) + nf.size = 0 + } + nf.layers -= tag.layers + if nf.layers < 0 { + log.Warn("node buffer list layers less 0", "old", nf.layers, "dealt", tag.layers) + nf.layers = 0 + } + nf.count-- + if nf.count < 0 { + log.Warn("node buffer list count less 0", "old", nf.count) + nf.count = 0 + } + + return tag +} + +// traverse iterates the nodebufferlist and call the cb. +func (nf *nodebufferlist) traverse(cb func(*multiDifflayer) bool) { + cursor := nf.head + for { + if cursor == nil { + return + } + next := cursor.next + if !cb(cursor) { + break + } + cursor = next + } + return +} + +// traverseReverse iterates the nodebufferlist in reverse and call the cb. +func (nf *nodebufferlist) traverseReverse(cb func(*multiDifflayer) bool) { + cursor := nf.tail + for { + if cursor == nil { + return + } + pre := cursor.pre + if !cb(cursor) { + break + } + cursor = pre + } + return +} + +// diffToBase calls traverseReverse and merges the multiDifflayer's nodes to +// base node buffer, if up to limit size and flush to disk. It is called +// periodically in the background +func (nf *nodebufferlist) diffToBase() { + commitFunc := func(buffer *multiDifflayer) bool { + if nf.base.size >= nf.base.limit { + log.Debug("base node buffer need write disk immediately") + return false + } + if nf.count <= nf.rsevMdNum { + log.Debug("node buffer list less, waiting more difflayer to be committed") + return false + } + if buffer.block%nf.dlInMd != 0 { + log.Crit("committed block number misaligned", "block", buffer.block) + } + + nf.baseMux.Lock() + err := nf.base.commit(buffer.root, buffer.id, buffer.block, buffer.layers, buffer.nodes) + nf.baseMux.Unlock() + if err != nil { + log.Error("failed to commit nodes to base node buffer", "error", err) + return false + } + + nf.mux.Lock() + _ = nf.popBack() + nodeBufferListSizeGauge.Update(int64(nf.size)) + nodeBufferListCountGauge.Update(int64(nf.count)) + nodeBufferListLayerGauge.Update(int64(nf.layers)) + if nf.layers > 0 { + nodeBufferListDifflayerAvgSize.Update(int64(nf.size / nf.layers)) + } + nf.mux.Unlock() + baseNodeBufferSizeGauge.Update(int64(nf.base.size)) + baseNodeBufferLayerGauge.Update(int64(nf.base.layers)) + if nf.base.layers > 0 { + baseNodeBufferDifflayerAvgSize.Update(int64(nf.base.size / nf.base.layers)) + } + nf.report() + + return true + } + nf.traverseReverse(commitFunc) +} + +// backgroundFlush flush base node buffer to disk. +func (nf *nodebufferlist) backgroundFlush() { + nf.flushMux.Lock() + defer nf.flushMux.Unlock() + nf.baseMux.RLock() + persistID := nf.persistID + nf.base.layers + nf.baseMux.RUnlock() + err := nf.base.flush(nf.db, nf.clean, persistID) + if err != nil { + log.Error("failed to flush base node buffer to disk", "error", err) + return + } + nf.baseMux.Lock() + nf.base.reset() + nf.persistID = persistID + nf.baseMux.Unlock() + + baseNodeBufferSizeGauge.Update(int64(nf.base.size)) + baseNodeBufferLayerGauge.Update(int64(nf.base.layers)) + nodeBufferListPersistIDGauge.Update(int64(nf.persistID)) +} + +// loop runs the background task, collects the nodes for writing to disk. +func (nf *nodebufferlist) loop() { + mergeTicker := time.NewTicker(time.Second * mergeMultiDifflayerInterval) + for { + select { + case <-nf.stopCh: + return + case <-mergeTicker.C: + if nf.stopFlushing.Load() { + continue + } + if nf.isFlushing.Swap(true) { + continue + } + nf.diffToBase() + if nf.base.size > nf.base.limit { + nf.backgroundFlush() + } + nf.isFlushing.Swap(false) + } + } +} + +// proposedBlockReader return the world state Reader of block that is proposed to L1. +func (nf *nodebufferlist) proposedBlockReader(blockRoot common.Hash) (layer, error) { + nf.mux.RLock() + defer nf.mux.RUnlock() + + var diff *multiDifflayer + context := []interface{}{ + "root", blockRoot, + } + find := func(buffer *multiDifflayer) bool { + context = append(context, []interface{}{"multi_difflayer_number", buffer.block}...) + context = append(context, []interface{}{"multi_difflayer_root", buffer.root}...) + if buffer.block%nf.wpBlocks == 0 { + if buffer.root == blockRoot { + diff = buffer + return false + } + } + return true + } + nf.traverse(find) + if diff == nil { + proposedBlockReaderMismatch.Mark(1) + log.Debug("proposed block state is not available", context...) + return nil, fmt.Errorf("proposed block proof state %#x is not available", blockRoot) + } + proposedBlockReaderSuccess.Mark(1) + return &proposedBlockReader{ + nf: nf, + diff: diff, + }, nil +} + +// report logs the nodebufferlist info for monitor. +func (nf *nodebufferlist) report() { + context := []interface{}{ + "number", nf.block, "count", nf.count, "layers", nf.layers, + "stateid", nf.stateId, "persist", nf.persistID, "size", common.StorageSize(nf.size), + "basesize", common.StorageSize(nf.base.size), "baselayers", nf.base.layers, + } + log.Info("node buffer list info", context...) +} + +var _ layer = &proposedBlockReader{} + +// proposedBlockReader implements the layer interface used to read the status of proposed +// blocks, which supports get withdrawal proof. It only needs to implement the Node function +// of the Reader interface. +type proposedBlockReader struct { + nf *nodebufferlist + diff *multiDifflayer +} + +func (w *proposedBlockReader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) { + w.nf.mux.RLock() + defer w.nf.mux.RUnlock() + + current := w.diff + for { + if current == nil { + break + } + node, err := current.node(owner, path, hash) + if err != nil { + return nil, err + } + if node != nil { + return node.Blob, nil + } + current = current.next + } + + node, err := w.nf.base.node(owner, path, hash) + if err != nil { + return nil, err + } + if node != nil { + return node.Blob, nil + } + + key := cacheKey(owner, path) + if w.nf.clean != nil { + if blob := w.nf.clean.Get(nil, key); len(blob) > 0 { + h := newHasher() + defer h.release() + + got := h.hash(blob) + if got == hash { + cleanHitMeter.Mark(1) + cleanReadMeter.Mark(int64(len(blob))) + return blob, nil + } + cleanFalseMeter.Mark(1) + log.Error("Unexpected trie node in clean cache", "owner", owner, "path", path, "expect", hash, "got", got) + } + cleanMissMeter.Mark(1) + } + + var ( + nBlob []byte + nHash common.Hash + ) + if owner == (common.Hash{}) { + nBlob, nHash = rawdb.ReadAccountTrieNode(w.nf.db, path) + } else { + nBlob, nHash = rawdb.ReadStorageTrieNode(w.nf.db, owner, path) + } + if nHash != hash { + diskFalseMeter.Mark(1) + log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash) + return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path, nBlob) + } + if w.nf.clean != nil && len(nBlob) > 0 { + w.nf.clean.Set(key, nBlob) + cleanWriteMeter.Mark(int64(len(nBlob))) + } + return nBlob, nil +} +func (w *proposedBlockReader) rootHash() common.Hash { return w.diff.root } +func (w *proposedBlockReader) stateID() uint64 { + return w.diff.id +} +func (w *proposedBlockReader) parentLayer() layer { return nil } +func (w *proposedBlockReader) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer { + return nil +} +func (w *proposedBlockReader) journal(io.Writer) error { return nil } + +// multiDifflayer compresses several difflayers in one map. As an element of nodebufferlist +// it is the smallest unit for storing trie nodes. +type multiDifflayer struct { + root common.Hash // Corresponding last root hash to which this layer diff belongs to + id uint64 // Corresponding last update state id + block uint64 // Corresponding last update block number + layers uint64 // The number of diff layers aggregated inside + size uint64 // The size of aggregated writes + limit uint64 // The maximum memory allowance in bytes + nodes map[common.Hash]map[string]*trienode.Node // The dirty node set, mapped by owner and path + + pre *multiDifflayer + next *multiDifflayer +} + +// newMultiDifflayer initializes the multiDifflayer with the provided nodes +func newMultiDifflayer(limit, size uint64, root common.Hash, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) *multiDifflayer { + return &multiDifflayer{ + root: root, + layers: layers, + size: size, + limit: limit, + nodes: nodes, + } +} + +// node retrieves the trie node with given node info. +func (mf *multiDifflayer) node(owner common.Hash, path []byte, hash common.Hash) (*trienode.Node, error) { + subset, ok := mf.nodes[owner] + if !ok { + return nil, nil + } + n, ok := subset[string(path)] + if !ok { + return nil, nil + } + if n.Hash != hash { + dirtyFalseMeter.Mark(1) + log.Error("Unexpected trie node in async node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) + return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path, n.Blob) + } + return n, nil +} + +// commit merges the dirty nodes into the newMultiDifflayer. This operation won't +// take the ownership of the nodes map which belongs to the bottom-most diff layer. +// It will just hold the node references from the given map which are safe to copy. +func (mf *multiDifflayer) commit(root common.Hash, id uint64, block uint64, layers uint64, nodes map[common.Hash]map[string]*trienode.Node) error { + if mf.id != 0 && mf.id >= id { + log.Warn("state id out of order", "pre_stateId", mf.id, "capping_stateId", id) + } + if mf.block != 0 && mf.block >= block { + log.Warn("block number out of order", "pre_block", mf.block, "capping_block", block) + } + + mf.root = root + mf.block = block + mf.id = id + + var ( + delta int64 + overwrite int64 + overwriteSize int64 + ) + for owner, subset := range nodes { + current, exist := mf.nodes[owner] + if !exist { + // Allocate a new map for the subset instead of claiming it directly + // from the passed map to avoid potential concurrent map read/write. + // The nodes belong to original diff layer are still accessible even + // after merging, thus the ownership of nodes map should still belong + // to original layer and any mutation on it should be prevented. + current = make(map[string]*trienode.Node) + for path, n := range subset { + current[path] = n + delta += int64(len(n.Blob) + len(path)) + } + mf.nodes[owner] = current + continue + } + for path, n := range subset { + if orig, exist := current[path]; !exist { + delta += int64(len(n.Blob) + len(path)) + } else { + delta += int64(len(n.Blob) - len(orig.Blob)) + overwrite++ + overwriteSize += int64(len(orig.Blob) + len(path)) + } + current[path] = n + } + mf.nodes[owner] = current + } + mf.updateSize(delta) + mf.layers += layers + gcNodesMeter.Mark(overwrite) + gcBytesMeter.Mark(overwriteSize) + return nil +} + +// updateSize updates the size of newMultiDifflayer. +func (mf *multiDifflayer) updateSize(delta int64) { + size := int64(mf.size) + delta + if size >= 0 { + mf.size = uint64(size) + return + } + s := mf.size + mf.size = 0 + log.Warn("Invalid pathdb buffer size", "prev", common.StorageSize(s), "delta", common.StorageSize(delta)) +} + +// reset clears the newMultiDifflayer. +func (mf *multiDifflayer) reset() { + mf.root = common.Hash{} + mf.id = 0 + mf.block = 0 + mf.layers = 0 + mf.size = 0 + mf.pre = nil + mf.next = nil + mf.nodes = make(map[common.Hash]map[string]*trienode.Node) +} + +// empty returns an indicator if multiDifflayer contains any state transition inside. +func (mf *multiDifflayer) empty() bool { + return mf.layers == 0 +} + +// flush persists the in-memory dirty trie node into the disk if the configured +// memory threshold is reached. Note, all data must be written atomically. +func (mf *multiDifflayer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error { + // Ensure the target state id is aligned with the internal counter. + head := rawdb.ReadPersistentStateID(db) + if head+mf.layers != id { + return fmt.Errorf("buffer layers (%d) cannot be applied on top of persisted state id (%d) to reach requested state id (%d)", mf.layers, head, id) + } + var ( + start = time.Now() + batch = db.NewBatchWithSize(int(float64(mf.size) * DefaultBatchRedundancyRate)) + ) + nodes := writeNodes(batch, mf.nodes, clean) + rawdb.WritePersistentStateID(batch, id) + + // Flush all mutations in a single batch + size := batch.ValueSize() + if err := batch.Write(); err != nil { + return err + } + commitBytesMeter.Mark(int64(size)) + commitNodesMeter.Mark(int64(nodes)) + commitTimeTimer.UpdateSince(start) + log.Debug("Persisted pathdb nodes", "nodes", len(mf.nodes), "bytes", common.StorageSize(size), "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// revert is the reverse operation of commit. It also merges the provided nodes +// into the multiDifflayer, the difference is that the provided node set should +// revert the changes made by the last state transition. +func (mf *multiDifflayer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error { + // Short circuit if no embedded state transition to revert. + if mf.layers == 0 { + return errStateUnrecoverable + } + mf.layers-- + + // Reset the entire buffer if only a single transition left. + if mf.layers == 0 { + mf.reset() + return nil + } + var delta int64 + for owner, subset := range nodes { + current, ok := mf.nodes[owner] + if !ok { + panic(fmt.Sprintf("non-existent subset (%x)", owner)) + } + for path, n := range subset { + orig, ok := current[path] + if !ok { + // There is a special case in MPT that one child is removed from + // a fullNode which only has two children, and then a new child + // with different position is immediately inserted into the fullNode. + // In this case, the clean child of the fullNode will also be + // marked as dirty because of node collapse and expansion. + // + // In case of database rollback, don't panic if this "clean" + // node occurs which is not present in buffer. + var nhash common.Hash + if owner == (common.Hash{}) { + _, nhash = rawdb.ReadAccountTrieNode(db, []byte(path)) + } else { + _, nhash = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) + } + // Ignore the clean node in the case described above. + if nhash == n.Hash { + continue + } + panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex())) + } + current[path] = n + delta += int64(len(n.Blob)) - int64(len(orig.Blob)) + } + } + mf.updateSize(delta) + return nil +}