From 4930614a097b70dd7bd2de0c680678bf980ed1a6 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 10 Mar 2023 20:00:23 +0100 Subject: [PATCH 01/24] params: begin v1.11.5 release cycle --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 74f27877ef..0cede57bbf 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 11 // Minor version component of the current release - VersionPatch = 4 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 11 // Minor version component of the current release + VersionPatch = 5 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. From 789de23d16045190af3ace2276ea3957190e869b Mon Sep 17 00:00:00 2001 From: Guruprasad Kamath <48196632+gurukamath@users.noreply.github.com> Date: Fri, 10 Mar 2023 21:47:05 +0100 Subject: [PATCH 02/24] tests: define `MuirGlacier` fork (#26856) add muir glacier to t8n --- tests/init.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/init.go b/tests/init.go index db037e3e1a..869f1bfcdd 100644 --- a/tests/init.go +++ b/tests/init.go @@ -90,6 +90,19 @@ var Forks = map[string]*params.ChainConfig{ PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), }, + "MuirGlacier": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + DAOForkBlock: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + }, "FrontierToHomesteadAt5": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(5), From ca61048178bd25bdb42adb026c1e1639864bcffd Mon Sep 17 00:00:00 2001 From: xiyang <90125263+JBossBC@users.noreply.github.com> Date: Mon, 13 Mar 2023 15:30:32 +0800 Subject: [PATCH 03/24] code/vm: fix comment typo (#26865) it should be constantinople rather than contantinople --- core/vm/jump_table.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 91f1be669a..a45287de80 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -98,7 +98,7 @@ func newMergeInstructionSet() JumpTable { } // newLondonInstructionSet returns the frontier, homestead, byzantium, -// contantinople, istanbul, petersburg, berlin and london instructions. +// constantinople, istanbul, petersburg, berlin and london instructions. func newLondonInstructionSet() JumpTable { instructionSet := newBerlinInstructionSet() enable3529(&instructionSet) // EIP-3529: Reduction in refunds https://eips.ethereum.org/EIPS/eip-3529 @@ -107,7 +107,7 @@ func newLondonInstructionSet() JumpTable { } // newBerlinInstructionSet returns the frontier, homestead, byzantium, -// contantinople, istanbul, petersburg and berlin instructions. +// constantinople, istanbul, petersburg and berlin instructions. func newBerlinInstructionSet() JumpTable { instructionSet := newIstanbulInstructionSet() enable2929(&instructionSet) // Access lists for trie accesses https://eips.ethereum.org/EIPS/eip-2929 @@ -115,7 +115,7 @@ func newBerlinInstructionSet() JumpTable { } // newIstanbulInstructionSet returns the frontier, homestead, byzantium, -// contantinople, istanbul and petersburg instructions. +// constantinople, istanbul and petersburg instructions. func newIstanbulInstructionSet() JumpTable { instructionSet := newConstantinopleInstructionSet() @@ -127,7 +127,7 @@ func newIstanbulInstructionSet() JumpTable { } // newConstantinopleInstructionSet returns the frontier, homestead, -// byzantium and contantinople instructions. +// byzantium and constantinople instructions. func newConstantinopleInstructionSet() JumpTable { instructionSet := newByzantiumInstructionSet() instructionSet[SHL] = &operation{ From a20e38720c239a8e62797ec0ff8ffbcb813b2300 Mon Sep 17 00:00:00 2001 From: s7v7nislands Date: Mon, 13 Mar 2023 16:02:50 +0800 Subject: [PATCH 04/24] core: minor code refactor (#26852) * core: refactor code * core: drop it from this anonymous goroutine func --- core/blockchain.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index f22562ccfa..1fe7d73e00 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1740,14 +1740,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) if followup, err := it.peek(); followup != nil && err == nil { throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps) - go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { + go func(start time.Time, followup *types.Block, throwaway *state.StateDB) { bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) blockPrefetchExecuteTimer.Update(time.Since(start)) - if atomic.LoadUint32(interrupt) == 1 { + if atomic.LoadUint32(&followupInterrupt) == 1 { blockPrefetchInterruptMeter.Mark(1) } - }(time.Now(), followup, throwaway, &followupInterrupt) + }(time.Now(), followup, throwaway) } } From d1c5f918a3d2e332d63725566a2ac54a616fa7e8 Mon Sep 17 00:00:00 2001 From: ucwong Date: Mon, 13 Mar 2023 02:45:25 -0600 Subject: [PATCH 05/24] core/txpool: use priceList.Put instead of heap.Push (#26863) Minor refactor to use the 'intended' accessor --- core/txpool/txpool.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 4306d5aee6..ad556f39fb 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -17,7 +17,6 @@ package txpool import ( - "container/heap" "errors" "fmt" "math" @@ -750,7 +749,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e // Add all transactions back to the priced queue if replacesPending { for _, dropTx := range drop { - heap.Push(&pool.priced.urgent, dropTx) + pool.priced.Put(dropTx, false) } log.Trace("Discarding future transaction replacing pending tx", "hash", hash) return false, ErrFutureReplacePending From 5f81db68c6cd0544ae5f3b9b4c7b1d56d02b23da Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Mon, 13 Mar 2023 04:51:23 -0600 Subject: [PATCH 06/24] eth: return error if 'safe' or 'finalized' tag used pre-merge (#26862) Co-authored-by: Martin Holst Swende Co-authored-by: Felix Lange --- eth/api_backend.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/eth/api_backend.go b/eth/api_backend.go index 78b9b08ecb..643f6369df 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -75,6 +75,9 @@ func (b *EthAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumb return b.eth.blockchain.CurrentBlock(), nil } if number == rpc.FinalizedBlockNumber { + if !b.eth.Merger().TDDReached() { + return nil, errors.New("'finalized' tag not supported on pre-merge network") + } block := b.eth.blockchain.CurrentFinalBlock() if block != nil { return block, nil @@ -82,6 +85,9 @@ func (b *EthAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumb return nil, errors.New("finalized block not found") } if number == rpc.SafeBlockNumber { + if !b.eth.Merger().TDDReached() { + return nil, errors.New("'safe' tag not supported on pre-merge network") + } block := b.eth.blockchain.CurrentSafeBlock() if block != nil { return block, nil @@ -124,10 +130,16 @@ func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumbe return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil } if number == rpc.FinalizedBlockNumber { + if !b.eth.Merger().TDDReached() { + return nil, errors.New("'finalized' tag not supported on pre-merge network") + } header := b.eth.blockchain.CurrentFinalBlock() return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil } if number == rpc.SafeBlockNumber { + if !b.eth.Merger().TDDReached() { + return nil, errors.New("'safe' tag not supported on pre-merge network") + } header := b.eth.blockchain.CurrentSafeBlock() return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil } From 94ff7219114540263c76d1895618041959fae1f9 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 13 Mar 2023 14:10:19 +0100 Subject: [PATCH 07/24] .travis.yml: reenable PPA build on tag push (#26873) --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index db21bd5d96..925e23b1a5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -140,7 +140,7 @@ jobs: # This builder does the Ubuntu PPA nightly uploads - stage: build - if: type = cron + if: type = cron || (type = push && tag ~= /^v[0-9]/) os: linux dist: bionic go: 1.20.x From c8a6b7100c56255a9cde580be59136beb8d28b8e Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 14 Mar 2023 16:50:53 +0800 Subject: [PATCH 08/24] core/state, trie: port changes from PBSS (#26763) --- core/state/statedb.go | 2 +- trie/committer.go | 46 ++---- trie/database.go | 9 +- trie/nodeset.go | 115 ++++++------- trie/proof.go | 2 +- trie/tracer.go | 125 ++++++++++++++ trie/tracer_test.go | 368 ++++++++++++++++++++++++++++++++++++++++++ trie/trie.go | 28 ++-- trie/trie_test.go | 91 +++++++---- trie/util_test.go | 305 ---------------------------------- trie/utils.go | 199 ----------------------- 11 files changed, 645 insertions(+), 645 deletions(-) create mode 100644 trie/tracer.go create mode 100644 trie/tracer_test.go delete mode 100644 trie/util_test.go delete mode 100644 trie/utils.go diff --git a/core/state/statedb.go b/core/state/statedb.go index 3d8fd15bbd..247aef8b23 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -970,8 +970,8 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { storageTrieNodesUpdated int storageTrieNodesDeleted int nodes = trie.NewMergedNodeSet() + codeWriter = s.db.DiskDB().NewBatch() ) - codeWriter := s.db.DiskDB().NewBatch() for addr := range s.stateObjectsDirty { if obj := s.stateObjects[addr]; !obj.deleted { // Write any contract code associated with the state object diff --git a/trie/committer.go b/trie/committer.go index c4957f3490..9f978873a8 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -33,29 +33,20 @@ type leaf struct { // insertion order. type committer struct { nodes *NodeSet - tracer *tracer collectLeaf bool } // newCommitter creates a new committer or picks one from the pool. -func newCommitter(owner common.Hash, tracer *tracer, collectLeaf bool) *committer { +func newCommitter(nodeset *NodeSet, collectLeaf bool) *committer { return &committer{ - nodes: NewNodeSet(owner), - tracer: tracer, + nodes: nodeset, collectLeaf: collectLeaf, } } -// Commit collapses a node down into a hash node and returns it along with -// the modified nodeset. -func (c *committer) Commit(n node) (hashNode, *NodeSet) { - h := c.commit(nil, n) - // Some nodes can be deleted from trie which can't be captured - // by committer itself. Iterate all deleted nodes tracked by - // tracer and marked them as deleted only if they are present - // in database previously. - c.tracer.markDeletions(c.nodes) - return h.(hashNode), c.nodes +// Commit collapses a node down into a hash node. +func (c *committer) Commit(n node) hashNode { + return c.commit(nil, n).(hashNode) } // commit collapses a node down into a hash node and returns it. @@ -74,9 +65,7 @@ func (c *committer) commit(path []byte, n node) node { // If the child is fullNode, recursively commit, // otherwise it can only be hashNode or valueNode. if _, ok := cn.Val.(*fullNode); ok { - childV := c.commit(append(path, cn.Key...), cn.Val) - - collapsed.Val = childV + collapsed.Val = c.commit(append(path, cn.Key...), cn.Val) } // The key needs to be copied, since we're adding it to the // modified nodeset. @@ -85,12 +74,6 @@ func (c *committer) commit(path []byte, n node) node { if hn, ok := hashedNode.(hashNode); ok { return hn } - // The short node now is embedded in its parent. Mark the node as - // deleted if it's present in database previously. It's equivalent - // as deletion from database's perspective. - if prev := c.tracer.getPrev(path); len(prev) != 0 { - c.nodes.markDeleted(path, prev) - } return collapsed case *fullNode: hashedKids := c.commitChildren(path, cn) @@ -101,12 +84,6 @@ func (c *committer) commit(path []byte, n node) node { if hn, ok := hashedNode.(hashNode); ok { return hn } - // The full node now is embedded in its parent. Mark the node as - // deleted if it's present in database previously. It's equivalent - // as deletion from database's perspective. - if prev := c.tracer.getPrev(path); len(prev) != 0 { - c.nodes.markDeleted(path, prev) - } return collapsed case hashNode: return cn @@ -134,8 +111,7 @@ func (c *committer) commitChildren(path []byte, n *fullNode) [17]node { // Commit the child recursively and store the "hashed" value. // Note the returned node can be some embedded nodes, so it's // possible the type is not hashNode. - hashed := c.commit(append(path, byte(i)), child) - children[i] = hashed + children[i] = c.commit(append(path, byte(i)), child) } // For the 17th child, it's possible the type is valuenode. if n.Children[16] != nil { @@ -155,6 +131,12 @@ func (c *committer) store(path []byte, n node) node { // usually is leaf node). But small value (less than 32bytes) is not // our target (leaves in account trie only). if hash == nil { + // The node is embedded in its parent, in other words, this node + // will not be stored in the database independently, mark it as + // deleted only if the node was existent in database before. + if _, ok := c.nodes.accessList[string(path)]; ok { + c.nodes.markDeleted(path) + } return n } // We have the hash already, estimate the RLP encoding-size of the node. @@ -169,7 +151,7 @@ func (c *committer) store(path []byte, n node) node { } ) // Collect the dirty node to nodeset for return. - c.nodes.markUpdated(path, mnode, c.tracer.getPrev(path)) + c.nodes.markUpdated(path, mnode) // Collect the corresponding leaf node if it's required. We don't check // full node since it's impossible to store value in fullNode. The key diff --git a/trie/database.go b/trie/database.go index 895ffdf89d..200ed3674b 100644 --- a/trie/database.go +++ b/trie/database.go @@ -792,13 +792,12 @@ func (db *Database) Update(nodes *MergedNodeSet) error { } for _, owner := range order { subset := nodes.sets[owner] - for _, path := range subset.updates.order { - n, ok := subset.updates.nodes[path] - if !ok { - return fmt.Errorf("missing node %x %v", owner, path) + subset.forEachWithOrder(func(path string, n *memoryNode) { + if n.isDeleted() { + return // ignore deletion } db.insert(n.hash, int(n.size), n.node) - } + }) } // Link up the account trie and storage trie if the node points // to an account trie leaf. diff --git a/trie/nodeset.go b/trie/nodeset.go index 9281723501..99e4a80fa8 100644 --- a/trie/nodeset.go +++ b/trie/nodeset.go @@ -19,6 +19,7 @@ package trie import ( "fmt" "reflect" + "sort" "strings" "github.com/ethereum/go-ethereum/common" @@ -40,8 +41,8 @@ var memoryNodeSize = int(reflect.TypeOf(memoryNode{}).Size()) // memorySize returns the total memory size used by this node. // nolint:unused -func (n *memoryNode) memorySize(key int) int { - return int(n.size) + memoryNodeSize + key +func (n *memoryNode) memorySize(pathlen int) int { + return int(n.size) + memoryNodeSize + pathlen } // rlp returns the raw rlp encoded blob of the cached trie node, either directly @@ -64,7 +65,13 @@ func (n *memoryNode) obj() node { return expandNode(n.hash[:], n.node) } +// isDeleted returns the indicator if the node is marked as deleted. +func (n *memoryNode) isDeleted() bool { + return n.hash == (common.Hash{}) +} + // nodeWithPrev wraps the memoryNode with the previous node value. +// nolint: unused type nodeWithPrev struct { *memoryNode prev []byte // RLP-encoded previous value, nil means it's non-existent @@ -79,64 +86,60 @@ func (n *nodeWithPrev) unwrap() *memoryNode { // memorySize returns the total memory size used by this node. It overloads // the function in memoryNode by counting the size of previous value as well. // nolint: unused -func (n *nodeWithPrev) memorySize(key int) int { - return n.memoryNode.memorySize(key) + len(n.prev) -} - -// nodesWithOrder represents a collection of dirty nodes which includes -// newly-inserted and updated nodes. The modification order of all nodes -// is represented by order list. -type nodesWithOrder struct { - order []string // the path list of dirty nodes, sort by insertion order - nodes map[string]*nodeWithPrev // the map of dirty nodes, keyed by node path +func (n *nodeWithPrev) memorySize(pathlen int) int { + return n.memoryNode.memorySize(pathlen) + len(n.prev) } // NodeSet contains all dirty nodes collected during the commit operation. // Each node is keyed by path. It's not thread-safe to use. type NodeSet struct { - owner common.Hash // the identifier of the trie - updates *nodesWithOrder // the set of updated nodes(newly inserted, updated) - deletes map[string][]byte // the map of deleted nodes, keyed by node - leaves []*leaf // the list of dirty leaves + owner common.Hash // the identifier of the trie + nodes map[string]*memoryNode // the set of dirty nodes(inserted, updated, deleted) + leaves []*leaf // the list of dirty leaves + updates int // the count of updated and inserted nodes + deletes int // the count of deleted nodes + + // The list of accessed nodes, which records the original node value. + // The origin value is expected to be nil for newly inserted node + // and is expected to be non-nil for other types(updated, deleted). + accessList map[string][]byte } // NewNodeSet initializes an empty node set to be used for tracking dirty nodes // from a specific account or storage trie. The owner is zero for the account // trie and the owning account address hash for storage tries. -func NewNodeSet(owner common.Hash) *NodeSet { +func NewNodeSet(owner common.Hash, accessList map[string][]byte) *NodeSet { return &NodeSet{ - owner: owner, - updates: &nodesWithOrder{ - nodes: make(map[string]*nodeWithPrev), - }, - deletes: make(map[string][]byte), + owner: owner, + nodes: make(map[string]*memoryNode), + accessList: accessList, } } -/* -// NewNodeSetWithDeletion initializes the nodeset with provided deletion set. -func NewNodeSetWithDeletion(owner common.Hash, paths [][]byte, prev [][]byte) *NodeSet { - set := NewNodeSet(owner) - for i, path := range paths { - set.markDeleted(path, prev[i]) +// forEachWithOrder iterates the dirty nodes with the order from bottom to top, +// right to left, nodes with the longest path will be iterated first. +func (set *NodeSet) forEachWithOrder(callback func(path string, n *memoryNode)) { + var paths sort.StringSlice + for path := range set.nodes { + paths = append(paths, path) + } + // Bottom-up, longest path first + sort.Sort(sort.Reverse(paths)) + for _, path := range paths { + callback(path, set.nodes[path]) } - return set } -*/ -// markUpdated marks the node as dirty(newly-inserted or updated) with provided -// node path, node object along with its previous value. -func (set *NodeSet) markUpdated(path []byte, node *memoryNode, prev []byte) { - set.updates.order = append(set.updates.order, string(path)) - set.updates.nodes[string(path)] = &nodeWithPrev{ - memoryNode: node, - prev: prev, - } +// markUpdated marks the node as dirty(newly-inserted or updated). +func (set *NodeSet) markUpdated(path []byte, node *memoryNode) { + set.nodes[string(path)] = node + set.updates += 1 } -// markDeleted marks the node as deleted with provided path and previous value. -func (set *NodeSet) markDeleted(path []byte, prev []byte) { - set.deletes[string(path)] = prev +// markDeleted marks the node as deleted. +func (set *NodeSet) markDeleted(path []byte) { + set.nodes[string(path)] = &memoryNode{} + set.deletes += 1 } // addLeaf collects the provided leaf node into set. @@ -144,16 +147,16 @@ func (set *NodeSet) addLeaf(node *leaf) { set.leaves = append(set.leaves, node) } -// Size returns the number of updated and deleted nodes contained in the set. +// Size returns the number of dirty nodes in set. func (set *NodeSet) Size() (int, int) { - return len(set.updates.order), len(set.deletes) + return set.updates, set.deletes } // Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can // we get rid of it? func (set *NodeSet) Hashes() []common.Hash { var ret []common.Hash - for _, node := range set.updates.nodes { + for _, node := range set.nodes { ret = append(ret, node.hash) } return ret @@ -163,19 +166,23 @@ func (set *NodeSet) Hashes() []common.Hash { func (set *NodeSet) Summary() string { var out = new(strings.Builder) fmt.Fprintf(out, "nodeset owner: %v\n", set.owner) - if set.updates != nil { - for _, key := range set.updates.order { - updated := set.updates.nodes[key] - if updated.prev != nil { - fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", key, updated.hash, updated.prev) - } else { - fmt.Fprintf(out, " [+]: %x -> %v\n", key, updated.hash) + if set.nodes != nil { + for path, n := range set.nodes { + // Deletion + if n.isDeleted() { + fmt.Fprintf(out, " [-]: %x prev: %x\n", path, set.accessList[path]) + continue } + // Insertion + origin, ok := set.accessList[path] + if !ok { + fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.hash) + continue + } + // Update + fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.hash, origin) } } - for k, n := range set.deletes { - fmt.Fprintf(out, " [-]: %x -> %x\n", k, n) - } for _, n := range set.leaves { fmt.Fprintf(out, "[leaf]: %v\n", n) } diff --git a/trie/proof.go b/trie/proof.go index af49ce36b3..f11dfc47af 100644 --- a/trie/proof.go +++ b/trie/proof.go @@ -563,7 +563,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key } // Rebuild the trie with the leaf stream, the shape of trie // should be same with the original one. - tr := &Trie{root: root, reader: newEmptyReader()} + tr := &Trie{root: root, reader: newEmptyReader(), tracer: newTracer()} if empty { tr.root = nil } diff --git a/trie/tracer.go b/trie/tracer.go new file mode 100644 index 0000000000..a27e371c7a --- /dev/null +++ b/trie/tracer.go @@ -0,0 +1,125 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import "github.com/ethereum/go-ethereum/common" + +// tracer tracks the changes of trie nodes. During the trie operations, +// some nodes can be deleted from the trie, while these deleted nodes +// won't be captured by trie.Hasher or trie.Committer. Thus, these deleted +// nodes won't be removed from the disk at all. Tracer is an auxiliary tool +// used to track all insert and delete operations of trie and capture all +// deleted nodes eventually. +// +// The changed nodes can be mainly divided into two categories: the leaf +// node and intermediate node. The former is inserted/deleted by callers +// while the latter is inserted/deleted in order to follow the rule of trie. +// This tool can track all of them no matter the node is embedded in its +// parent or not, but valueNode is never tracked. +// +// Besides, it's also used for recording the original value of the nodes +// when they are resolved from the disk. The pre-value of the nodes will +// be used to construct trie history in the future. +// +// Note tracer is not thread-safe, callers should be responsible for handling +// the concurrency issues by themselves. +type tracer struct { + inserts map[string]struct{} + deletes map[string]struct{} + accessList map[string][]byte +} + +// newTracer initializes the tracer for capturing trie changes. +func newTracer() *tracer { + return &tracer{ + inserts: make(map[string]struct{}), + deletes: make(map[string]struct{}), + accessList: make(map[string][]byte), + } +} + +// onRead tracks the newly loaded trie node and caches the rlp-encoded +// blob internally. Don't change the value outside of function since +// it's not deep-copied. +func (t *tracer) onRead(path []byte, val []byte) { + t.accessList[string(path)] = val +} + +// onInsert tracks the newly inserted trie node. If it's already +// in the deletion set (resurrected node), then just wipe it from +// the deletion set as it's "untouched". +func (t *tracer) onInsert(path []byte) { + if _, present := t.deletes[string(path)]; present { + delete(t.deletes, string(path)) + return + } + t.inserts[string(path)] = struct{}{} +} + +// onDelete tracks the newly deleted trie node. If it's already +// in the addition set, then just wipe it from the addition set +// as it's untouched. +func (t *tracer) onDelete(path []byte) { + if _, present := t.inserts[string(path)]; present { + delete(t.inserts, string(path)) + return + } + t.deletes[string(path)] = struct{}{} +} + +// reset clears the content tracked by tracer. +func (t *tracer) reset() { + t.inserts = make(map[string]struct{}) + t.deletes = make(map[string]struct{}) + t.accessList = make(map[string][]byte) +} + +// copy returns a deep copied tracer instance. +func (t *tracer) copy() *tracer { + var ( + inserts = make(map[string]struct{}) + deletes = make(map[string]struct{}) + accessList = make(map[string][]byte) + ) + for path := range t.inserts { + inserts[path] = struct{}{} + } + for path := range t.deletes { + deletes[path] = struct{}{} + } + for path, blob := range t.accessList { + accessList[path] = common.CopyBytes(blob) + } + return &tracer{ + inserts: inserts, + deletes: deletes, + accessList: accessList, + } +} + +// markDeletions puts all tracked deletions into the provided nodeset. +func (t *tracer) markDeletions(set *NodeSet) { + for path := range t.deletes { + // It's possible a few deleted nodes were embedded + // in their parent before, the deletions can be no + // effect by deleting nothing, filter them out. + if _, ok := set.accessList[path]; !ok { + continue + } + set.markDeleted([]byte(path)) + } +} diff --git a/trie/tracer_test.go b/trie/tracer_test.go new file mode 100644 index 0000000000..5e627c89c9 --- /dev/null +++ b/trie/tracer_test.go @@ -0,0 +1,368 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" +) + +var ( + tiny = []struct{ k, v string }{ + {"k1", "v1"}, + {"k2", "v2"}, + {"k3", "v3"}, + } + nonAligned = []struct{ k, v string }{ + {"do", "verb"}, + {"ether", "wookiedoo"}, + {"horse", "stallion"}, + {"shaman", "horse"}, + {"doge", "coin"}, + {"dog", "puppy"}, + {"somethingveryoddindeedthis is", "myothernodedata"}, + } + standard = []struct{ k, v string }{ + {string(randBytes(32)), "verb"}, + {string(randBytes(32)), "wookiedoo"}, + {string(randBytes(32)), "stallion"}, + {string(randBytes(32)), "horse"}, + {string(randBytes(32)), "coin"}, + {string(randBytes(32)), "puppy"}, + {string(randBytes(32)), "myothernodedata"}, + } +) + +func TestTrieTracer(t *testing.T) { + testTrieTracer(t, tiny) + testTrieTracer(t, nonAligned) + testTrieTracer(t, standard) +} + +// Tests if the trie diffs are tracked correctly. Tracer should capture +// all non-leaf dirty nodes, no matter the node is embedded or not. +func testTrieTracer(t *testing.T, vals []struct{ k, v string }) { + db := NewDatabase(rawdb.NewMemoryDatabase()) + trie := NewEmpty(db) + + // Determine all new nodes are tracked + for _, val := range vals { + trie.Update([]byte(val.k), []byte(val.v)) + } + insertSet := copySet(trie.tracer.inserts) // copy before commit + deleteSet := copySet(trie.tracer.deletes) // copy before commit + root, nodes := trie.Commit(false) + db.Update(NewWithNodeSet(nodes)) + + seen := setKeys(iterNodes(db, root)) + if !compareSet(insertSet, seen) { + t.Fatal("Unexpected insertion set") + } + if !compareSet(deleteSet, nil) { + t.Fatal("Unexpected deletion set") + } + + // Determine all deletions are tracked + trie, _ = New(TrieID(root), db) + for _, val := range vals { + trie.Delete([]byte(val.k)) + } + insertSet, deleteSet = copySet(trie.tracer.inserts), copySet(trie.tracer.deletes) + if !compareSet(insertSet, nil) { + t.Fatal("Unexpected insertion set") + } + if !compareSet(deleteSet, seen) { + t.Fatal("Unexpected deletion set") + } +} + +// Test that after inserting a new batch of nodes and deleting them immediately, +// the trie tracer should be cleared normally as no operation happened. +func TestTrieTracerNoop(t *testing.T) { + testTrieTracerNoop(t, tiny) + testTrieTracerNoop(t, nonAligned) + testTrieTracerNoop(t, standard) +} + +func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) { + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + for _, val := range vals { + trie.Update([]byte(val.k), []byte(val.v)) + } + for _, val := range vals { + trie.Delete([]byte(val.k)) + } + if len(trie.tracer.inserts) != 0 { + t.Fatal("Unexpected insertion set") + } + if len(trie.tracer.deletes) != 0 { + t.Fatal("Unexpected deletion set") + } +} + +// Tests if the accessList is correctly tracked. +func TestAccessList(t *testing.T) { + testAccessList(t, tiny) + testAccessList(t, nonAligned) + testAccessList(t, standard) +} + +func testAccessList(t *testing.T, vals []struct{ k, v string }) { + var ( + db = NewDatabase(rawdb.NewMemoryDatabase()) + trie = NewEmpty(db) + orig = trie.Copy() + ) + // Create trie from scratch + for _, val := range vals { + trie.Update([]byte(val.k), []byte(val.v)) + } + root, nodes := trie.Commit(false) + db.Update(NewWithNodeSet(nodes)) + + trie, _ = New(TrieID(root), db) + if err := verifyAccessList(orig, trie, nodes); err != nil { + t.Fatalf("Invalid accessList %v", err) + } + + // Update trie + trie, _ = New(TrieID(root), db) + orig = trie.Copy() + for _, val := range vals { + trie.Update([]byte(val.k), randBytes(32)) + } + root, nodes = trie.Commit(false) + db.Update(NewWithNodeSet(nodes)) + + trie, _ = New(TrieID(root), db) + if err := verifyAccessList(orig, trie, nodes); err != nil { + t.Fatalf("Invalid accessList %v", err) + } + + // Add more new nodes + trie, _ = New(TrieID(root), db) + orig = trie.Copy() + var keys []string + for i := 0; i < 30; i++ { + key := randBytes(32) + keys = append(keys, string(key)) + trie.Update(key, randBytes(32)) + } + root, nodes = trie.Commit(false) + db.Update(NewWithNodeSet(nodes)) + + trie, _ = New(TrieID(root), db) + if err := verifyAccessList(orig, trie, nodes); err != nil { + t.Fatalf("Invalid accessList %v", err) + } + + // Partial deletions + trie, _ = New(TrieID(root), db) + orig = trie.Copy() + for _, key := range keys { + trie.Update([]byte(key), nil) + } + root, nodes = trie.Commit(false) + db.Update(NewWithNodeSet(nodes)) + + trie, _ = New(TrieID(root), db) + if err := verifyAccessList(orig, trie, nodes); err != nil { + t.Fatalf("Invalid accessList %v", err) + } + + // Delete all + trie, _ = New(TrieID(root), db) + orig = trie.Copy() + for _, val := range vals { + trie.Update([]byte(val.k), nil) + } + root, nodes = trie.Commit(false) + db.Update(NewWithNodeSet(nodes)) + + trie, _ = New(TrieID(root), db) + if err := verifyAccessList(orig, trie, nodes); err != nil { + t.Fatalf("Invalid accessList %v", err) + } +} + +// Tests origin values won't be tracked in Iterator or Prover +func TestAccessListLeak(t *testing.T) { + var ( + db = NewDatabase(rawdb.NewMemoryDatabase()) + trie = NewEmpty(db) + ) + // Create trie from scratch + for _, val := range standard { + trie.Update([]byte(val.k), []byte(val.v)) + } + root, nodes := trie.Commit(false) + db.Update(NewWithNodeSet(nodes)) + + var cases = []struct { + op func(tr *Trie) + }{ + { + func(tr *Trie) { + it := tr.NodeIterator(nil) + for it.Next(true) { + } + }, + }, + { + func(tr *Trie) { + it := NewIterator(tr.NodeIterator(nil)) + for it.Next() { + } + }, + }, + { + func(tr *Trie) { + for _, val := range standard { + tr.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase()) + } + }, + }, + } + for _, c := range cases { + trie, _ = New(TrieID(root), db) + n1 := len(trie.tracer.accessList) + c.op(trie) + n2 := len(trie.tracer.accessList) + + if n1 != n2 { + t.Fatalf("AccessList is leaked, prev %d after %d", n1, n2) + } + } +} + +// Tests whether the original tree node is correctly deleted after being embedded +// in its parent due to the smaller size of the original tree node. +func TestTinyTree(t *testing.T) { + var ( + db = NewDatabase(rawdb.NewMemoryDatabase()) + trie = NewEmpty(db) + ) + for _, val := range tiny { + trie.Update([]byte(val.k), randBytes(32)) + } + root, set := trie.Commit(false) + db.Update(NewWithNodeSet(set)) + + trie, _ = New(TrieID(root), db) + orig := trie.Copy() + for _, val := range tiny { + trie.Update([]byte(val.k), []byte(val.v)) + } + root, set = trie.Commit(false) + db.Update(NewWithNodeSet(set)) + + trie, _ = New(TrieID(root), db) + if err := verifyAccessList(orig, trie, set); err != nil { + t.Fatalf("Invalid accessList %v", err) + } +} + +func compareSet(setA, setB map[string]struct{}) bool { + if len(setA) != len(setB) { + return false + } + for key := range setA { + if _, ok := setB[key]; !ok { + return false + } + } + return true +} + +func forNodes(tr *Trie) map[string][]byte { + var ( + it = tr.NodeIterator(nil) + nodes = make(map[string][]byte) + ) + for it.Next(true) { + if it.Leaf() { + continue + } + nodes[string(it.Path())] = common.CopyBytes(it.NodeBlob()) + } + return nodes +} + +func iterNodes(db *Database, root common.Hash) map[string][]byte { + tr, _ := New(TrieID(root), db) + return forNodes(tr) +} + +func forHashedNodes(tr *Trie) map[string][]byte { + var ( + it = tr.NodeIterator(nil) + nodes = make(map[string][]byte) + ) + for it.Next(true) { + if it.Hash() == (common.Hash{}) { + continue + } + nodes[string(it.Path())] = common.CopyBytes(it.NodeBlob()) + } + return nodes +} + +func diffTries(trieA, trieB *Trie) (map[string][]byte, map[string][]byte, map[string][]byte) { + var ( + nodesA = forHashedNodes(trieA) + nodesB = forHashedNodes(trieB) + inA = make(map[string][]byte) // hashed nodes in trie a but not b + inB = make(map[string][]byte) // hashed nodes in trie b but not a + both = make(map[string][]byte) // hashed nodes in both tries but different value + ) + for path, blobA := range nodesA { + if blobB, ok := nodesB[path]; ok { + if bytes.Equal(blobA, blobB) { + continue + } + both[path] = blobA + continue + } + inA[path] = blobA + } + for path, blobB := range nodesB { + if _, ok := nodesA[path]; ok { + continue + } + inB[path] = blobB + } + return inA, inB, both +} + +func setKeys(set map[string][]byte) map[string]struct{} { + keys := make(map[string]struct{}) + for k := range set { + keys[k] = struct{}{} + } + return keys +} + +func copySet(set map[string]struct{}) map[string]struct{} { + copied := make(map[string]struct{}) + for k := range set { + copied[k] = struct{}{} + } + return copied +} diff --git a/trie/trie.go b/trie/trie.go index cf9108f107..17bacba00f 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -81,7 +81,7 @@ func New(id *ID, db NodeReader) (*Trie, error) { trie := &Trie{ owner: id.Owner, reader: reader, - //tracer: newTracer(), + tracer: newTracer(), } if id.Root != (common.Hash{}) && id.Root != types.EmptyRootHash { rootnode, err := trie.resolveAndTrack(id.Root[:], nil) @@ -547,7 +547,7 @@ func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) { // Hash returns the root hash of the trie. It does not write to the // database and can be used even if the trie doesn't have one. func (t *Trie) Hash() common.Hash { - hash, cached, _ := t.hashRoot() + hash, cached := t.hashRoot() t.root = cached return common.BytesToHash(hash.(hashNode)) } @@ -561,14 +561,14 @@ func (t *Trie) Hash() common.Hash { func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet) { defer t.tracer.reset() + nodes := NewNodeSet(t.owner, t.tracer.accessList) + t.tracer.markDeletions(nodes) + // Trie is empty and can be classified into two types of situations: // - The trie was empty and no update happens // - The trie was non-empty and all nodes are dropped if t.root == nil { - // Wrap tracked deletions as the return - set := NewNodeSet(t.owner) - t.tracer.markDeletions(set) - return types.EmptyRootHash, set + return types.EmptyRootHash, nodes } // Derive the hash for all dirty nodes first. We hold the assumption // in the following procedure that all nodes are hashed. @@ -582,23 +582,23 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet) { t.root = hashedNode return rootHash, nil } - h := newCommitter(t.owner, t.tracer, collectLeaf) - newRoot, nodes := h.Commit(t.root) - t.root = newRoot + t.root = newCommitter(nodes, collectLeaf).Commit(t.root) return rootHash, nodes } // hashRoot calculates the root hash of the given trie -func (t *Trie) hashRoot() (node, node, error) { +func (t *Trie) hashRoot() (node, node) { if t.root == nil { - return hashNode(types.EmptyRootHash.Bytes()), nil, nil + return hashNode(types.EmptyRootHash.Bytes()), nil } // If the number of changes is below 100, we let one thread handle it h := newHasher(t.unhashed >= 100) - defer returnHasherToPool(h) + defer func() { + returnHasherToPool(h) + t.unhashed = 0 + }() hashed, cached := h.hash(t.root, true) - t.unhashed = 0 - return hashed, cached, nil + return hashed, cached } // Reset drops the referenced root node and cleans all internal state. diff --git a/trie/trie_test.go b/trie/trie_test.go index 2f56c89cde..e415937073 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -403,6 +403,51 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value { return reflect.ValueOf(steps) } +func verifyAccessList(old *Trie, new *Trie, set *NodeSet) error { + deletes, inserts, updates := diffTries(old, new) + + // Check insertion set + for path := range inserts { + n, ok := set.nodes[path] + if !ok || n.isDeleted() { + return errors.New("expect new node") + } + _, ok = set.accessList[path] + if ok { + return errors.New("unexpected origin value") + } + } + // Check deletion set + for path, blob := range deletes { + n, ok := set.nodes[path] + if !ok || !n.isDeleted() { + return errors.New("expect deleted node") + } + v, ok := set.accessList[path] + if !ok { + return errors.New("expect origin value") + } + if !bytes.Equal(v, blob) { + return errors.New("invalid origin value") + } + } + // Check update set + for path, blob := range updates { + n, ok := set.nodes[path] + if !ok || n.isDeleted() { + return errors.New("expect updated node") + } + v, ok := set.accessList[path] + if !ok { + return errors.New("expect origin value") + } + if !bytes.Equal(v, blob) { + return errors.New("invalid origin value") + } + } + return nil +} + func runRandTest(rt randTest) bool { var ( triedb = NewDatabase(rawdb.NewMemoryDatabase()) @@ -410,8 +455,6 @@ func runRandTest(rt randTest) bool { values = make(map[string]string) // tracks content of the trie origTrie = NewEmpty(triedb) ) - tr.tracer = newTracer() - for i, step := range rt { // fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n", // step.op, step.key, step.value, i) @@ -447,24 +490,6 @@ func runRandTest(rt randTest) bool { tr.Hash() case opCommit: root, nodes := tr.Commit(true) - // Validity the returned nodeset - if nodes != nil { - for path, node := range nodes.updates.nodes { - blob, _, _ := origTrie.TryGetNode(hexToCompact([]byte(path))) - got := node.prev - if !bytes.Equal(blob, got) { - rt[i].err = fmt.Errorf("prevalue mismatch for 0x%x, got 0x%x want 0x%x", path, got, blob) - panic(rt[i].err) - } - } - for path, prev := range nodes.deletes { - blob, _, _ := origTrie.TryGetNode(hexToCompact([]byte(path))) - if !bytes.Equal(blob, prev) { - rt[i].err = fmt.Errorf("prevalue mismatch for 0x%x, got 0x%x want 0x%x", path, prev, blob) - return false - } - } - } if nodes != nil { triedb.Update(NewWithNodeSet(nodes)) } @@ -473,13 +498,13 @@ func runRandTest(rt randTest) bool { rt[i].err = err return false } + if nodes != nil { + if err := verifyAccessList(origTrie, newtr, nodes); err != nil { + rt[i].err = err + return false + } + } tr = newtr - - // Enable node tracing. Resolve the root node again explicitly - // since it's not captured at the beginning. - tr.tracer = newTracer() - tr.resolveAndTrack(root.Bytes(), nil) - origTrie = tr.Copy() case opItercheckhash: checktr := NewEmpty(triedb) @@ -492,8 +517,6 @@ func runRandTest(rt randTest) bool { } case opNodeDiff: var ( - inserted = tr.tracer.insertList() - deleted = tr.tracer.deleteList() origIter = origTrie.NodeIterator(nil) curIter = tr.NodeIterator(nil) origSeen = make(map[string]struct{}) @@ -527,19 +550,19 @@ func runRandTest(rt randTest) bool { deleteExp[path] = struct{}{} } } - if len(insertExp) != len(inserted) { + if len(insertExp) != len(tr.tracer.inserts) { rt[i].err = fmt.Errorf("insert set mismatch") } - if len(deleteExp) != len(deleted) { + if len(deleteExp) != len(tr.tracer.deletes) { rt[i].err = fmt.Errorf("delete set mismatch") } - for _, insert := range inserted { - if _, present := insertExp[string(insert)]; !present { + for insert := range tr.tracer.inserts { + if _, present := insertExp[insert]; !present { rt[i].err = fmt.Errorf("missing inserted node") } } - for _, del := range deleted { - if _, present := deleteExp[string(del)]; !present { + for del := range tr.tracer.deletes { + if _, present := deleteExp[del]; !present { rt[i].err = fmt.Errorf("missing deleted node") } } diff --git a/trie/util_test.go b/trie/util_test.go deleted file mode 100644 index 8d925a16aa..0000000000 --- a/trie/util_test.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "bytes" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" -) - -// Tests if the trie diffs are tracked correctly. -func TestTrieTracer(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) - trie := NewEmpty(db) - trie.tracer = newTracer() - - // Insert a batch of entries, all the nodes should be marked as inserted - vals := []struct{ k, v string }{ - {"do", "verb"}, - {"ether", "wookiedoo"}, - {"horse", "stallion"}, - {"shaman", "horse"}, - {"doge", "coin"}, - {"dog", "puppy"}, - {"somethingveryoddindeedthis is", "myothernodedata"}, - } - for _, val := range vals { - trie.Update([]byte(val.k), []byte(val.v)) - } - trie.Hash() - - seen := make(map[string]struct{}) - it := trie.NodeIterator(nil) - for it.Next(true) { - if it.Leaf() { - continue - } - seen[string(it.Path())] = struct{}{} - } - inserted := trie.tracer.insertList() - if len(inserted) != len(seen) { - t.Fatalf("Unexpected inserted node tracked want %d got %d", len(seen), len(inserted)) - } - for _, k := range inserted { - _, ok := seen[string(k)] - if !ok { - t.Fatalf("Unexpected inserted node") - } - } - deleted := trie.tracer.deleteList() - if len(deleted) != 0 { - t.Fatalf("Unexpected deleted node tracked %d", len(deleted)) - } - - // Commit the changes and re-create with new root - root, nodes := trie.Commit(false) - if err := db.Update(NewWithNodeSet(nodes)); err != nil { - t.Fatal(err) - } - trie, _ = New(TrieID(root), db) - trie.tracer = newTracer() - - // Delete all the elements, check deletion set - for _, val := range vals { - trie.Delete([]byte(val.k)) - } - trie.Hash() - - inserted = trie.tracer.insertList() - if len(inserted) != 0 { - t.Fatalf("Unexpected inserted node tracked %d", len(inserted)) - } - deleted = trie.tracer.deleteList() - if len(deleted) != len(seen) { - t.Fatalf("Unexpected deleted node tracked want %d got %d", len(seen), len(deleted)) - } - for _, k := range deleted { - _, ok := seen[string(k)] - if !ok { - t.Fatalf("Unexpected inserted node") - } - } -} - -func TestTrieTracerNoop(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - trie.tracer = newTracer() - - // Insert a batch of entries, all the nodes should be marked as inserted - vals := []struct{ k, v string }{ - {"do", "verb"}, - {"ether", "wookiedoo"}, - {"horse", "stallion"}, - {"shaman", "horse"}, - {"doge", "coin"}, - {"dog", "puppy"}, - {"somethingveryoddindeedthis is", "myothernodedata"}, - } - for _, val := range vals { - trie.Update([]byte(val.k), []byte(val.v)) - } - for _, val := range vals { - trie.Delete([]byte(val.k)) - } - if len(trie.tracer.insertList()) != 0 { - t.Fatalf("Unexpected inserted node tracked %d", len(trie.tracer.insertList())) - } - if len(trie.tracer.deleteList()) != 0 { - t.Fatalf("Unexpected deleted node tracked %d", len(trie.tracer.deleteList())) - } -} - -func TestTrieTracePrevValue(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) - trie := NewEmpty(db) - trie.tracer = newTracer() - - paths, blobs := trie.tracer.prevList() - if len(paths) != 0 || len(blobs) != 0 { - t.Fatalf("Nothing should be tracked") - } - // Insert a batch of entries, all the nodes should be marked as inserted - vals := []struct{ k, v string }{ - {"do", "verb"}, - {"ether", "wookiedoo"}, - {"horse", "stallion"}, - {"shaman", "horse"}, - {"doge", "coin"}, - {"dog", "puppy"}, - {"somethingveryoddindeedthis is", "myothernodedata"}, - } - for _, val := range vals { - trie.Update([]byte(val.k), []byte(val.v)) - } - paths, blobs = trie.tracer.prevList() - if len(paths) != 0 || len(blobs) != 0 { - t.Fatalf("Nothing should be tracked") - } - - // Commit the changes and re-create with new root - root, nodes := trie.Commit(false) - if err := db.Update(NewWithNodeSet(nodes)); err != nil { - t.Fatal(err) - } - trie, _ = New(TrieID(root), db) - trie.tracer = newTracer() - trie.resolveAndTrack(root.Bytes(), nil) - - // Load all nodes in trie - for _, val := range vals { - trie.TryGet([]byte(val.k)) - } - - // Ensure all nodes are tracked by tracer with correct prev-values - iter := trie.NodeIterator(nil) - seen := make(map[string][]byte) - for iter.Next(true) { - // Embedded nodes are ignored since they are not present in - // database. - if iter.Hash() == (common.Hash{}) { - continue - } - seen[string(iter.Path())] = common.CopyBytes(iter.NodeBlob()) - } - - paths, blobs = trie.tracer.prevList() - if len(paths) != len(seen) || len(blobs) != len(seen) { - t.Fatalf("Unexpected tracked values") - } - for i, path := range paths { - blob := blobs[i] - prev, ok := seen[string(path)] - if !ok { - t.Fatalf("Missing node %v", path) - } - if !bytes.Equal(blob, prev) { - t.Fatalf("Unexpected value path: %v, want: %v, got: %v", path, prev, blob) - } - } - - // Re-open the trie and iterate the trie, ensure nothing will be tracked. - // Iterator will not link any loaded nodes to trie. - trie, _ = New(TrieID(root), db) - trie.tracer = newTracer() - - iter = trie.NodeIterator(nil) - for iter.Next(true) { - } - paths, blobs = trie.tracer.prevList() - if len(paths) != 0 || len(blobs) != 0 { - t.Fatalf("Nothing should be tracked") - } - - // Re-open the trie and generate proof for entries, ensure nothing will - // be tracked. Prover will not link any loaded nodes to trie. - trie, _ = New(TrieID(root), db) - trie.tracer = newTracer() - for _, val := range vals { - trie.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase()) - } - paths, blobs = trie.tracer.prevList() - if len(paths) != 0 || len(blobs) != 0 { - t.Fatalf("Nothing should be tracked") - } - - // Delete entries from trie, ensure all previous values are correct. - trie, _ = New(TrieID(root), db) - trie.tracer = newTracer() - trie.resolveAndTrack(root.Bytes(), nil) - - for _, val := range vals { - trie.TryDelete([]byte(val.k)) - } - paths, blobs = trie.tracer.prevList() - if len(paths) != len(seen) || len(blobs) != len(seen) { - t.Fatalf("Unexpected tracked values") - } - for i, path := range paths { - blob := blobs[i] - prev, ok := seen[string(path)] - if !ok { - t.Fatalf("Missing node %v", path) - } - if !bytes.Equal(blob, prev) { - t.Fatalf("Unexpected value path: %v, want: %v, got: %v", path, prev, blob) - } - } -} - -func TestDeleteAll(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) - trie := NewEmpty(db) - trie.tracer = newTracer() - - // Insert a batch of entries, all the nodes should be marked as inserted - vals := []struct{ k, v string }{ - {"do", "verb"}, - {"ether", "wookiedoo"}, - {"horse", "stallion"}, - {"shaman", "horse"}, - {"doge", "coin"}, - {"dog", "puppy"}, - {"somethingveryoddindeedthis is", "myothernodedata"}, - } - for _, val := range vals { - trie.Update([]byte(val.k), []byte(val.v)) - } - root, set := trie.Commit(false) - if err := db.Update(NewWithNodeSet(set)); err != nil { - t.Fatal(err) - } - // Delete entries from trie, ensure all values are detected - trie, _ = New(TrieID(root), db) - trie.tracer = newTracer() - trie.resolveAndTrack(root.Bytes(), nil) - - // Iterate all existent nodes - var ( - it = trie.NodeIterator(nil) - nodes = make(map[string][]byte) - ) - for it.Next(true) { - if it.Hash() != (common.Hash{}) { - nodes[string(it.Path())] = common.CopyBytes(it.NodeBlob()) - } - } - - // Perform deletion to purge the entire trie - for _, val := range vals { - trie.Delete([]byte(val.k)) - } - root, set = trie.Commit(false) - if root != types.EmptyRootHash { - t.Fatalf("Invalid trie root %v", root) - } - for path, blob := range set.deletes { - prev, ok := nodes[path] - if !ok { - t.Fatalf("Extra node deleted %v", []byte(path)) - } - if !bytes.Equal(prev, blob) { - t.Fatalf("Unexpected previous value %v", []byte(path)) - } - } - if len(set.deletes) != len(nodes) { - t.Fatalf("Unexpected deletion set") - } -} diff --git a/trie/utils.go b/trie/utils.go deleted file mode 100644 index 5dce65cd29..0000000000 --- a/trie/utils.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -// tracer tracks the changes of trie nodes. During the trie operations, -// some nodes can be deleted from the trie, while these deleted nodes -// won't be captured by trie.Hasher or trie.Committer. Thus, these deleted -// nodes won't be removed from the disk at all. Tracer is an auxiliary tool -// used to track all insert and delete operations of trie and capture all -// deleted nodes eventually. -// -// The changed nodes can be mainly divided into two categories: the leaf -// node and intermediate node. The former is inserted/deleted by callers -// while the latter is inserted/deleted in order to follow the rule of trie. -// This tool can track all of them no matter the node is embedded in its -// parent or not, but valueNode is never tracked. -// -// Besides, it's also used for recording the original value of the nodes -// when they are resolved from the disk. The pre-value of the nodes will -// be used to construct reverse-diffs in the future. -// -// Note tracer is not thread-safe, callers should be responsible for handling -// the concurrency issues by themselves. -type tracer struct { - insert map[string]struct{} - delete map[string]struct{} - origin map[string][]byte -} - -// newTracer initializes the tracer for capturing trie changes. -func newTracer() *tracer { - return &tracer{ - insert: make(map[string]struct{}), - delete: make(map[string]struct{}), - origin: make(map[string][]byte), - } -} - -// onRead tracks the newly loaded trie node and caches the rlp-encoded blob internally. -// Don't change the value outside of function since it's not deep-copied. -func (t *tracer) onRead(path []byte, val []byte) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - t.origin[string(path)] = val -} - -// onInsert tracks the newly inserted trie node. If it's already in the deletion set -// (resurrected node), then just wipe it from the deletion set as the "untouched". -func (t *tracer) onInsert(path []byte) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - if _, present := t.delete[string(path)]; present { - delete(t.delete, string(path)) - return - } - t.insert[string(path)] = struct{}{} -} - -// onDelete tracks the newly deleted trie node. If it's already -// in the addition set, then just wipe it from the addition set -// as it's untouched. -func (t *tracer) onDelete(path []byte) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - if _, present := t.insert[string(path)]; present { - delete(t.insert, string(path)) - return - } - t.delete[string(path)] = struct{}{} -} - -// insertList returns the tracked inserted trie nodes in list format. -func (t *tracer) insertList() [][]byte { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil - } - var ret [][]byte - for path := range t.insert { - ret = append(ret, []byte(path)) - } - return ret -} - -// deleteList returns the tracked deleted trie nodes in list format. -func (t *tracer) deleteList() [][]byte { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil - } - var ret [][]byte - for path := range t.delete { - ret = append(ret, []byte(path)) - } - return ret -} - -// prevList returns the tracked node blobs in list format. -func (t *tracer) prevList() ([][]byte, [][]byte) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil, nil - } - var ( - paths [][]byte - blobs [][]byte - ) - for path, blob := range t.origin { - paths = append(paths, []byte(path)) - blobs = append(blobs, blob) - } - return paths, blobs -} - -// getPrev returns the cached original value of the specified node. -func (t *tracer) getPrev(path []byte) []byte { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil - } - return t.origin[string(path)] -} - -// reset clears the content tracked by tracer. -func (t *tracer) reset() { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - t.insert = make(map[string]struct{}) - t.delete = make(map[string]struct{}) - t.origin = make(map[string][]byte) -} - -// copy returns a deep copied tracer instance. -func (t *tracer) copy() *tracer { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil - } - var ( - insert = make(map[string]struct{}) - delete = make(map[string]struct{}) - origin = make(map[string][]byte) - ) - for key := range t.insert { - insert[key] = struct{}{} - } - for key := range t.delete { - delete[key] = struct{}{} - } - for key, val := range t.origin { - origin[key] = val - } - return &tracer{ - insert: insert, - delete: delete, - origin: origin, - } -} - -// markDeletions puts all tracked deletions into the provided nodeset. -func (t *tracer) markDeletions(set *NodeSet) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - for _, path := range t.deleteList() { - // There are a few possibilities for this scenario(the node is deleted - // but not present in database previously), for example the node was - // embedded in the parent and now deleted from the trie. In this case - // it's noop from database's perspective. - val := t.getPrev(path) - if len(val) == 0 { - continue - } - set.markDeleted(path, val) - } -} From eca3d39c314c0eaa2b2b616619fde84d032a11b1 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Tue, 14 Mar 2023 07:40:40 -0400 Subject: [PATCH 09/24] p2p/discover: pass invalid discv5 packets to Unhandled channel (#26699) This makes it possible to run another protocol alongside discv5, by reading unhandled packets from the channel. --- p2p/discover/v5_udp.go | 10 ++++++++++ p2p/discover/v5wire/encoding.go | 18 ++++++++++++++++-- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/p2p/discover/v5_udp.go b/p2p/discover/v5_udp.go index 53a1c6f767..38f5b3b652 100644 --- a/p2p/discover/v5_udp.go +++ b/p2p/discover/v5_udp.go @@ -83,6 +83,7 @@ type UDPv5 struct { callCh chan *callV5 callDoneCh chan *callV5 respTimeoutCh chan *callTimeout + unhandled chan<- ReadPacket // state of dispatch codec codecV5 @@ -156,6 +157,7 @@ func newUDPv5(conn UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv5, error) { callCh: make(chan *callV5), callDoneCh: make(chan *callV5), respTimeoutCh: make(chan *callTimeout), + unhandled: cfg.Unhandled, // state of dispatch codec: v5wire.NewCodec(ln, cfg.PrivateKey, cfg.Clock, cfg.V5ProtocolID), activeCallByNode: make(map[enode.ID]*callV5), @@ -657,6 +659,14 @@ func (t *UDPv5) handlePacket(rawpacket []byte, fromAddr *net.UDPAddr) error { addr := fromAddr.String() fromID, fromNode, packet, err := t.codec.Decode(rawpacket, addr) if err != nil { + if t.unhandled != nil && v5wire.IsInvalidHeader(err) { + // The packet seems unrelated to discv5, send it to the next protocol. + // t.log.Trace("Unhandled discv5 packet", "id", fromID, "addr", addr, "err", err) + up := ReadPacket{Data: make([]byte, len(rawpacket)), Addr: fromAddr} + copy(up.Data, rawpacket) + t.unhandled <- up + return nil + } t.log.Debug("Bad discv5 packet", "id", fromID, "addr", addr, "err", err) return err } diff --git a/p2p/discover/v5wire/encoding.go b/p2p/discover/v5wire/encoding.go index d979ab0f9c..5108910620 100644 --- a/p2p/discover/v5wire/encoding.go +++ b/p2p/discover/v5wire/encoding.go @@ -94,6 +94,8 @@ const ( // Should reject packets smaller than minPacketSize. minPacketSize = 63 + maxPacketSize = 1280 + minMessageSize = 48 // this refers to data after static headers randomPacketMsgSize = 20 ) @@ -122,6 +124,13 @@ var ( ErrInvalidReqID = errors.New("request ID larger than 8 bytes") ) +// IsInvalidHeader reports whether 'err' is related to an invalid packet header. When it +// returns false, it is pretty certain that the packet causing the error does not belong +// to discv5. +func IsInvalidHeader(err error) bool { + return err == errTooShort || err == errInvalidHeader || err == errMsgTooShort +} + // Packet sizes. var ( sizeofStaticHeader = binary.Size(StaticHeader{}) @@ -147,6 +156,7 @@ type Codec struct { msgctbuf []byte // message data ciphertext // decoder buffer + decbuf []byte reader bytes.Reader } @@ -158,6 +168,7 @@ func NewCodec(ln *enode.LocalNode, key *ecdsa.PrivateKey, clock mclock.Clock, pr privkey: key, sc: NewSessionCache(1024, clock), protocolID: DefaultProtocolID, + decbuf: make([]byte, maxPacketSize), } if protocolID != nil { c.protocolID = *protocolID @@ -424,10 +435,13 @@ func (c *Codec) encryptMessage(s *session, p Packet, head *Header, headerData [] } // Decode decodes a discovery packet. -func (c *Codec) Decode(input []byte, addr string) (src enode.ID, n *enode.Node, p Packet, err error) { - if len(input) < minPacketSize { +func (c *Codec) Decode(inputData []byte, addr string) (src enode.ID, n *enode.Node, p Packet, err error) { + if len(inputData) < minPacketSize { return enode.ID{}, nil, nil, errTooShort } + // Copy the packet to a tmp buffer to avoid modifying it. + c.decbuf = append(c.decbuf[:0], inputData...) + input := c.decbuf // Unmask the static header. var head Header copy(head.IV[:], input[:sizeofMaskingIV]) From b5c9be3358f0eb0a6f217474678dfd609f1d2743 Mon Sep 17 00:00:00 2001 From: Stephen Flynn Date: Tue, 14 Mar 2023 10:23:49 -0400 Subject: [PATCH 10/24] all: update links in documentation (#26882) Co-authored-by: Stephen Flynn --- .github/CONTRIBUTING.md | 2 +- README.md | 12 ++++++------ cmd/checkpoint-admin/README.md | 2 +- cmd/geth/consolecmd.go | 6 +++--- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index a08542df25..969b7f8f9f 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -35,6 +35,6 @@ and help. ## Configuration, dependencies, and tests -Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide) +Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/geth-developer/dev-guide) for more details on configuring your environment, managing project dependencies and testing procedures. diff --git a/README.md b/README.md index 325f796a3e..34bd8b94c9 100644 --- a/README.md +++ b/README.md @@ -36,10 +36,10 @@ directory. | Command | Description | | :--------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. | +| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/fundamentals/command-line-options) for command line options. | | `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. | | `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. | -| `abigen` | Source code generator to convert Ethereum contract definitions into easy-to-use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. | +| `abigen` | Source code generator to convert Ethereum contract definitions into easy-to-use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/developers/dapp-developer/native-bindings) page for details. | | `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | | `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | | `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | @@ -47,7 +47,7 @@ directory. ## Running `geth` Going through all the possible command line flags is out of scope here (please consult our -[CLI Wiki page](https://geth.ethereum.org/docs/interface/command-line-options)), +[CLI Wiki page](https://geth.ethereum.org/docs/fundamentals/command-line-options)), but we've enumerated a few common parameter combos to get you up to speed quickly on how you can run your own `geth` instance. @@ -82,10 +82,10 @@ This command will: * Start `geth` in snap sync mode (default, can be changed with the `--syncmode` flag), causing it to download more data in exchange for avoiding processing the entire history of the Ethereum network, which is very CPU intensive. - * Start the built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console), + * Start the built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interacting-with-geth/javascript-console), (via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md) (note: the `web3` version bundled within `geth` is very old, and not up to date with official docs), - as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server). + as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/interacting-with-geth/rpc). This tool is optional and if you leave it out you can always attach it to an already running `geth` instance with `geth attach`. @@ -175,7 +175,7 @@ accessible from the outside. As a developer, sooner rather than later you'll want to start interacting with `geth` and the Ethereum network via your own programs and not manually through the console. To aid this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://ethereum.github.io/execution-apis/api-documentation/) -and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)). +and [`geth` specific APIs](https://geth.ethereum.org/docs/interacting-with-geth/rpc)). These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based platforms, and named pipes on Windows). diff --git a/cmd/checkpoint-admin/README.md b/cmd/checkpoint-admin/README.md index 7c0c657eb5..1067ead056 100644 --- a/cmd/checkpoint-admin/README.md +++ b/cmd/checkpoint-admin/README.md @@ -46,7 +46,7 @@ Deploy checkpoint oracle contract. `--signers` indicates the specified trusted s checkpoint-admin deploy --rpc --clef --signer --signers --threshold 1 ``` -It is worth noting that checkpoint-admin only supports clef as a signer for transactions and plain text(checkpoint). For more clef usage, please see the clef [tutorial](https://geth.ethereum.org/docs/clef/tutorial) . +It is worth noting that checkpoint-admin only supports clef as a signer for transactions and plain text(checkpoint). For more clef usage, please see the clef [tutorial](https://geth.ethereum.org/docs/tools/clef/tutorial) . #### Sign diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index 23f6fd277c..aeee6f9c9e 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -37,7 +37,7 @@ var ( Description: ` The Geth console is an interactive shell for the JavaScript runtime environment which exposes a node admin interface as well as the Ðapp JavaScript API. -See https://geth.ethereum.org/docs/interface/javascript-console.`, +See https://geth.ethereum.org/docs/interacting-with-geth/javascript-console.`, } attachCommand = &cli.Command{ @@ -49,7 +49,7 @@ See https://geth.ethereum.org/docs/interface/javascript-console.`, Description: ` The Geth console is an interactive shell for the JavaScript runtime environment which exposes a node admin interface as well as the Ðapp JavaScript API. -See https://geth.ethereum.org/docs/interface/javascript-console. +See https://geth.ethereum.org/docs/interacting-with-geth/javascript-console. This command allows to open a console on a running geth node.`, } @@ -61,7 +61,7 @@ This command allows to open a console on a running geth node.`, Flags: flags.Merge(nodeFlags, consoleFlags), Description: ` The JavaScript VM exposes a node admin interface as well as the Ðapp -JavaScript API. See https://geth.ethereum.org/docs/interface/javascript-console`, +JavaScript API. See https://geth.ethereum.org/docs/interacting-with-geth/javascript-console`, } ) From 6bc68f8d94b359e08beefa2b6006e5ad003519f9 Mon Sep 17 00:00:00 2001 From: Jonathan Otto Date: Tue, 14 Mar 2023 13:41:28 -0400 Subject: [PATCH 11/24] Increase websocket frame size (from erigon rpc client) (#26883) This increases the maximum allowed message size to 32MB. Originally submitted at https://github.com/ledgerwatch/erigon/pull/2739 example block failure: https://etherscan.io/tx/0x1317d973a55cedf9b0f2df6ea48e8077dd176f5444a3423368a46d6e4db89982#internal --- rpc/websocket.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/websocket.go b/rpc/websocket.go index 0ac2a2792d..a6b95dd2ac 100644 --- a/rpc/websocket.go +++ b/rpc/websocket.go @@ -38,7 +38,7 @@ const ( wsPingInterval = 30 * time.Second wsPingWriteTimeout = 5 * time.Second wsPongTimeout = 30 * time.Second - wsMessageSizeLimit = 15 * 1024 * 1024 + wsMessageSizeLimit = 32 * 1024 * 1024 ) var wsBufferPool = new(sync.Pool) From f86913bc3e9a4f2439b6c3cd4d00cb364495238c Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 15 Mar 2023 14:34:36 +0100 Subject: [PATCH 12/24] cmd/devp2p, cmd/geth: add version in --help output (#26895) Not sure why this was removed, it's pretty useful to see the version also in --help. --- cmd/devp2p/main.go | 1 - cmd/geth/main.go | 1 - 2 files changed, 2 deletions(-) diff --git a/cmd/devp2p/main.go b/cmd/devp2p/main.go index 9e13d29ab7..8461a8b9b5 100644 --- a/cmd/devp2p/main.go +++ b/cmd/devp2p/main.go @@ -29,7 +29,6 @@ import ( var app = flags.NewApp("go-ethereum devp2p tool") func init() { - app.HideVersion = true app.Flags = append(app.Flags, debug.Flags...) app.Before = func(ctx *cli.Context) error { flags.MigrateGlobalFlags(ctx) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 5ba0702498..a970e76523 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -205,7 +205,6 @@ var app = flags.NewApp("the go-ethereum command line interface") func init() { // Initialize the CLI app and start Geth app.Action = geth - app.HideVersion = true // we have a command to print the version app.Copyright = "Copyright 2013-2023 The go-ethereum Authors" app.Commands = []*cli.Command{ // See chaincmd.go: From bba2a1bac5fb896e644ff6a5307419812e0f6819 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 16 Mar 2023 15:12:34 +0800 Subject: [PATCH 13/24] core: show db error-info in case of mismatched hash root (#26870) When a database failure occurs, bubble it up a into statedb, and report it in suitable places, such as during a 'bad block' report. --- core/block_validator.go | 2 +- core/state/state_object.go | 47 ++++++++------------------------------ core/state/statedb.go | 10 ++++---- core/vm/interface.go | 2 -- 4 files changed, 17 insertions(+), 44 deletions(-) diff --git a/core/block_validator.go b/core/block_validator.go index db7ea35687..2a8df7c965 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -113,7 +113,7 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD // Validate the state root against the received state root and throw // an error if they don't match. if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { - return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root) + return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error()) } return nil } diff --git a/core/state/state_object.go b/core/state/state_object.go index 5dfd3c1b64..7e34cba44a 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -43,7 +43,6 @@ func (s Storage) String() (str string) { for key, value := range s { str += fmt.Sprintf("%X : %X\n", key, value) } - return } @@ -52,7 +51,6 @@ func (s Storage) Copy() Storage { for key, value := range s { cpy[key] = value } - return cpy } @@ -68,13 +66,6 @@ type stateObject struct { data types.StateAccount db *StateDB - // DB error. - // State objects are used by the consensus core and VM which are - // unable to deal with database-level errors. Any error that occurs - // during a database read is memoized here and will eventually be returned - // by StateDB.Commit. - dbErr error - // Write caches. trie Trie // storage trie, which becomes non-nil on first access code Code // contract bytecode, which gets set when code is loaded @@ -84,7 +75,7 @@ type stateObject struct { dirtyStorage Storage // Storage entries that have been modified in the current transaction execution // Cache flags. - // When an object is marked suicided it will be delete from the trie + // When an object is marked suicided it will be deleted from the trie // during the "update" phase of the state transition. dirtyCode bool // true if the code was updated suicided bool @@ -123,13 +114,6 @@ func (s *stateObject) EncodeRLP(w io.Writer) error { return rlp.Encode(w, &s.data) } -// setError remembers the first non-nil error it is called with. -func (s *stateObject) setError(err error) { - if s.dbErr == nil { - s.dbErr = err - } -} - func (s *stateObject) markSuicided() { s.suicided = true } @@ -214,7 +198,7 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has start := time.Now() tr, err := s.getTrie(db) if err != nil { - s.setError(err) + s.db.setError(err) return common.Hash{} } enc, err = tr.TryGet(key.Bytes()) @@ -222,7 +206,7 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has s.db.StorageReads += time.Since(start) } if err != nil { - s.setError(err) + s.db.setError(err) return common.Hash{} } } @@ -230,7 +214,7 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has if len(enc) > 0 { _, content, _, err := rlp.Split(enc) if err != nil { - s.setError(err) + s.db.setError(err) } value.SetBytes(content) } @@ -296,7 +280,7 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) { ) tr, err := s.getTrie(db) if err != nil { - s.setError(err) + s.db.setError(err) return nil, err } // Insert all the pending updates into the trie @@ -311,7 +295,7 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) { var v []byte if (value == common.Hash{}) { if err := tr.TryDelete(key[:]); err != nil { - s.setError(err) + s.db.setError(err) return nil, err } s.db.StorageDeleted += 1 @@ -319,7 +303,7 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) { // Encoding []byte cannot fail, ok to ignore the error. v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) if err := tr.TryUpdate(key[:], v); err != nil { - s.setError(err) + s.db.setError(err) return nil, err } s.db.StorageUpdated += 1 @@ -351,7 +335,6 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) { func (s *stateObject) updateRoot(db Database) { tr, err := s.updateTrie(db) if err != nil { - s.setError(fmt.Errorf("updateRoot (%x) error: %w", s.address, err)) return } // If nothing changed, don't bother with hashing anything @@ -372,9 +355,6 @@ func (s *stateObject) commitTrie(db Database) (*trie.NodeSet, error) { if err != nil { return nil, err } - if s.dbErr != nil { - return nil, s.dbErr - } // If nothing changed, don't bother with committing anything if tr == nil { return nil, nil @@ -385,7 +365,7 @@ func (s *stateObject) commitTrie(db Database) (*trie.NodeSet, error) { } root, nodes := tr.Commit(false) s.data.Root = root - return nodes, err + return nodes, nil } // AddBalance adds amount to s's balance. @@ -457,7 +437,7 @@ func (s *stateObject) Code(db Database) []byte { } code, err := db.ContractCode(s.addrHash, common.BytesToHash(s.CodeHash())) if err != nil { - s.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err)) + s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err)) } s.code = code return code @@ -475,7 +455,7 @@ func (s *stateObject) CodeSize(db Database) int { } size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.CodeHash())) if err != nil { - s.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err)) + s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err)) } return size } @@ -519,10 +499,3 @@ func (s *stateObject) Balance() *big.Int { func (s *stateObject) Nonce() uint64 { return s.data.Nonce } - -// Value is never called, but must be present to allow stateObject to be used -// as a vm.Account interface that also satisfies the vm.ContractRef -// interface. Interfaces are awesome. -func (s *stateObject) Value() *big.Int { - panic("Value on stateObject should never be called") -} diff --git a/core/state/statedb.go b/core/state/statedb.go index 247aef8b23..54d5040451 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -81,8 +81,10 @@ type StateDB struct { // DB error. // State objects are used by the consensus core and VM which are // unable to deal with database-level errors. Any error that occurs - // during a database read is memoized here and will eventually be returned - // by StateDB.Commit. + // during a database read is memoized here and will eventually be + // returned by StateDB.Commit. Notably, this error is also shared + // by all cached state objects in case the database failure occurs + // when accessing state of accounts. dbErr error // The refund counter, also used by state transitioning. @@ -187,6 +189,7 @@ func (s *StateDB) setError(err error) { } } +// Error returns the memorized database failure occurred earlier. func (s *StateDB) Error() error { return s.dbErr } @@ -478,13 +481,11 @@ func (s *StateDB) SetTransientState(addr common.Address, key, value common.Hash) if prev == value { return } - s.journal.append(transientStorageChange{ account: &addr, key: key, prevalue: prev, }) - s.setTransientState(addr, key, value) } @@ -957,6 +958,7 @@ func (s *StateDB) clearJournalAndRefund() { // Commit writes the state to the underlying in-memory trie database. func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { + // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) } diff --git a/core/vm/interface.go b/core/vm/interface.go index 0ee32b1dd5..b83f78307e 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -76,8 +76,6 @@ type StateDB interface { AddLog(*types.Log) AddPreimage(common.Hash, []byte) - - ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error } // CallContext provides a basic interface for the EVM calling conventions. The EVM From 48d1bf06780799304b96d8c0fea3cc61941da4df Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Fri, 17 Mar 2023 03:34:25 +0800 Subject: [PATCH 14/24] consensus: improve consensus engine definition (#26871) Makes clear the distinction between Finalize and FinalizedAndAssemble: - In Finalize function, a series of state operations are applied according to consensus rules. The statedb is mutated and the root hash can be checked and compared afterwards. This function should be used in block processing(receive afrom network and apply it locally) but not block generation. - In FinalizeAndAssemble function, after applying state mutations, the block is also to be assembled with the latest state root computed, updating the header. This function should be used in block generation only. --- consensus/beacon/consensus.go | 15 +++++----- consensus/clique/clique.go | 14 ++++----- consensus/consensus.go | 10 +++---- consensus/ethash/consensus.go | 11 ++++---- core/block_validator.go | 6 ++-- core/blockchain.go | 53 ++++++++++++++++++----------------- 6 files changed, 54 insertions(+), 55 deletions(-) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index eb5aa58ca8..b4da9b553a 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -326,10 +326,8 @@ func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.H return nil } -// Finalize implements consensus.Engine, setting the final state on the header +// Finalize implements consensus.Engine and processes withdrawals on top. func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, withdrawals []*types.Withdrawal) { - // Finalize is different with Prepare, it can be used in both block generation - // and verification. So determine the consensus rules by header type. if !beacon.IsPoSHeader(header) { beacon.ethone.Finalize(chain, header, state, txs, uncles, nil) return @@ -341,16 +339,12 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types. amount = amount.Mul(amount, big.NewInt(params.GWei)) state.AddBalance(w.Address, amount) } - // The block reward is no longer handled here. It's done by the - // external consensus engine. - header.Root = state.IntermediateRoot(true) + // No block reward which is issued by consensus layer instead. } // FinalizeAndAssemble implements consensus.Engine, setting the final state and // assembling the block. func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, withdrawals []*types.Withdrawal) (*types.Block, error) { - // FinalizeAndAssemble is different with Prepare, it can be used in both block - // generation and verification. So determine the consensus rules by header type. if !beacon.IsPoSHeader(header) { return beacon.ethone.FinalizeAndAssemble(chain, header, state, txs, uncles, receipts, nil) } @@ -367,6 +361,11 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea } // Finalize and assemble the block. beacon.Finalize(chain, header, state, txs, uncles, withdrawals) + + // Assign the final state root to header. + header.Root = state.IntermediateRoot(true) + + // Assemble and return the final block. return types.NewBlockWithWithdrawals(header, txs, uncles, receipts, withdrawals, trie.NewStackTrie(nil)), nil } diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 4706bbac1c..6c20803b2a 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -565,12 +565,10 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header return nil } -// Finalize implements consensus.Engine, ensuring no uncles are set, nor block -// rewards given. +// Finalize implements consensus.Engine. There is no post-transaction +// consensus rules in clique, do nothing here. func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, withdrawals []*types.Withdrawal) { - // No block rewards in PoA, so the state remains as is and uncles are dropped - header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) - header.UncleHash = types.CalcUncleHash(nil) + // No block rewards in PoA, so the state remains as is } // FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set, @@ -579,11 +577,13 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header * if len(withdrawals) > 0 { return nil, errors.New("clique does not support withdrawals") } - // Finalize block c.Finalize(chain, header, state, txs, uncles, nil) - // Assemble and return the final block for sealing + // Assign the final state root to header. + header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) + + // Assemble and return the final block for sealing. return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)), nil } diff --git a/consensus/consensus.go b/consensus/consensus.go index 190d5ae12c..a9fd8dd1cf 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -84,16 +84,16 @@ type Engine interface { // rules of a particular engine. The changes are executed inline. Prepare(chain ChainHeaderReader, header *types.Header) error - // Finalize runs any post-transaction state modifications (e.g. block rewards) - // but does not assemble the block. + // Finalize runs any post-transaction state modifications (e.g. block rewards + // or process withdrawals) but does not assemble the block. // - // Note: The block header and state database might be updated to reflect any - // consensus rules that happen at finalization (e.g. block rewards). + // Note: The state database might be updated to reflect any consensus rules + // that happen at finalization (e.g. block rewards). Finalize(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, withdrawals []*types.Withdrawal) // FinalizeAndAssemble runs any post-transaction state modifications (e.g. block - // rewards) and assembles the final block. + // rewards or process withdrawals) and assembles the final block. // // Note: The block header and state database might be updated to reflect any // consensus rules that happen at finalization (e.g. block rewards). diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index da29e16597..c3c06c541c 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -598,12 +598,10 @@ func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.H return nil } -// Finalize implements consensus.Engine, accumulating the block and uncle rewards, -// setting the final state on the header +// Finalize implements consensus.Engine, accumulating the block and uncle rewards. func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, withdrawals []*types.Withdrawal) { - // Accumulate any block and uncle rewards and commit the final state root + // Accumulate any block and uncle rewards accumulateRewards(chain.Config(), state, header, uncles) - header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) } // FinalizeAndAssemble implements consensus.Engine, accumulating the block and @@ -612,9 +610,12 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea if len(withdrawals) > 0 { return nil, errors.New("ethash does not support withdrawals") } - // Finalize block ethash.Finalize(chain, header, state, txs, uncles, nil) + + // Assign the final state root to header. + header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) + // Header seems complete, assemble into a block and return return types.NewBlock(header, txs, uncles, receipts, trie.NewStackTrie(nil)), nil } diff --git a/core/block_validator.go b/core/block_validator.go index 2a8df7c965..bcb228830d 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -90,10 +90,8 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { return nil } -// ValidateState validates the various changes that happen after a state -// transition, such as amount of used gas, the receipt roots and the state root -// itself. ValidateState returns a database batch if the validation was a success -// otherwise nil and an error is returned. +// ValidateState validates the various changes that happen after a state transition, +// such as amount of used gas, the receipt roots and the state root itself. func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64) error { header := block.Header() if block.GasUsed() != usedGas { diff --git a/core/blockchain.go b/core/blockchain.go index 1fe7d73e00..8fc520e776 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1752,44 +1752,45 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) } // Process block using the parent state as reference point - substart := time.Now() + pstart := time.Now() receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) if err != nil { bc.reportBlock(block, receipts, err) atomic.StoreUint32(&followupInterrupt, 1) return it.index, err } + ptime := time.Since(pstart) - // Update the metrics touched during block processing - accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them - storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them - accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them - storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them - snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them - snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them - triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation - trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates - trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates - - blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash) - - // Validate the state using the default validator - substart = time.Now() + vstart := time.Now() if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { bc.reportBlock(block, receipts, err) atomic.StoreUint32(&followupInterrupt, 1) return it.index, err } - proctime := time.Since(start) - - // Update the metrics touched during block validation - accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them - storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them - blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash)) + vtime := time.Since(vstart) + proctime := time.Since(start) // processing + validation + + // Update the metrics touched during block processing and validation + accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing) + storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing) + snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete(in processing) + snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete(in processing) + accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation) + storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation) + accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation) + storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete(in validation) + triehash := statedb.AccountHashes + statedb.StorageHashes // The time spent on tries hashing + trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update + trieRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read + trieRead += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read + blockExecutionTimer.Update(ptime - trieRead) // The time spent on EVM processing + blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation // Write the block to the chain and get the status. - substart = time.Now() - var status WriteStatus + var ( + wstart = time.Now() + status WriteStatus + ) if !setHead { // Don't set the head, only insert the block err = bc.writeBlockWithState(block, receipts, statedb) @@ -1804,9 +1805,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them - triedbCommitTimer.Update(statedb.TrieDBCommits) // Triedb commits are complete, we can mark them + triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them - blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits) + blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits) blockInsertTimer.UpdateSince(start) // Report the import stats before returning the various results From d8066dcde801baed8ebc1605bd3bfaefa781e8e3 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Thu, 16 Mar 2023 20:35:36 +0100 Subject: [PATCH 15/24] eth/catalyst: increase update consensus timeout (#26840) Increases the time between consensus updates that we give the CL before we start warning the user. --- eth/catalyst/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index fc7529c8f5..9077f20bff 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -72,7 +72,7 @@ const ( // beaconUpdateConsensusTimeout is the max time allowed for a beacon client // to send a consensus update before it's considered offline and the user is // warned. - beaconUpdateConsensusTimeout = 30 * time.Second + beaconUpdateConsensusTimeout = 2 * time.Minute // beaconUpdateWarnFrequency is the frequency at which to warn the user that // the beacon client is offline. From f73365738309d9b3249efca9c1492856bf6e7848 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Thu, 16 Mar 2023 15:53:39 -0700 Subject: [PATCH 16/24] internal/ethapi: avoid int overflow in GetTransactionReceipt (#26911) --- internal/ethapi/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 93a2d1264d..0cf29e5fe6 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1626,7 +1626,7 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common. if err != nil { return nil, err } - if len(receipts) <= int(index) { + if uint64(len(receipts)) <= index { return nil, nil } receipt := receipts[index] From b7bfbc1e649cecc9fa11ad55b3943272a919c24b Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Fri, 17 Mar 2023 03:19:51 -0700 Subject: [PATCH 17/24] trie, accounts/abi: add error-checks (#26914) --- accounts/abi/abi.go | 5 ++++- trie/stacktrie.go | 8 ++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go index 841d3c6cb6..62b860e18c 100644 --- a/accounts/abi/abi.go +++ b/accounts/abi/abi.go @@ -246,7 +246,10 @@ func UnpackRevert(data []byte) (string, error) { if !bytes.Equal(data[:4], revertSelector) { return "", errors.New("invalid data for unpacking") } - typ, _ := NewType("string", "", nil) + typ, err := NewType("string", "", nil) + if err != nil { + return "", err + } unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:]) if err != nil { return "", err diff --git a/trie/stacktrie.go b/trie/stacktrie.go index e7b3171af6..83034e29a5 100644 --- a/trie/stacktrie.go +++ b/trie/stacktrie.go @@ -144,7 +144,9 @@ func (st *StackTrie) unmarshalBinary(r io.Reader) error { Val []byte Key []byte } - gob.NewDecoder(r).Decode(&dec) + if err := gob.NewDecoder(r).Decode(&dec); err != nil { + return err + } st.owner = dec.Owner st.nodeType = dec.NodeType st.val = dec.Val @@ -158,7 +160,9 @@ func (st *StackTrie) unmarshalBinary(r io.Reader) error { continue } var child StackTrie - child.unmarshalBinary(r) + if err := child.unmarshalBinary(r); err != nil { + return err + } st.children[i] = &child } return nil From 58d0f6440ba82a0cbf327ca77b7cd795c2d687c6 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Fri, 17 Mar 2023 06:51:55 -0400 Subject: [PATCH 18/24] rlp: support for uint256 (#26898) This adds built-in support in package rlp for encoding, decoding and generating code dealing with uint256.Int. --------- Co-authored-by: Felix Lange --- rlp/decode.go | 64 +++++++++++++++++++++++++++++ rlp/decode_test.go | 42 +++++++++++++++++++ rlp/encbuffer.go | 25 +++++++++++ rlp/encode.go | 21 ++++++++++ rlp/encode_test.go | 49 ++++++++++++++++++++++ rlp/rlpgen/gen.go | 51 ++++++++++++++++++++++- rlp/rlpgen/gen_test.go | 2 +- rlp/rlpgen/testdata/uint256.in.txt | 10 +++++ rlp/rlpgen/testdata/uint256.out.txt | 44 ++++++++++++++++++++ rlp/rlpgen/types.go | 10 +++++ 10 files changed, 316 insertions(+), 2 deletions(-) create mode 100644 rlp/rlpgen/testdata/uint256.in.txt create mode 100644 rlp/rlpgen/testdata/uint256.out.txt diff --git a/rlp/decode.go b/rlp/decode.go index c9b2652414..c9b50e8c18 100644 --- a/rlp/decode.go +++ b/rlp/decode.go @@ -29,6 +29,7 @@ import ( "sync" "github.com/ethereum/go-ethereum/rlp/internal/rlpstruct" + "github.com/holiman/uint256" ) //lint:ignore ST1012 EOL is not an error. @@ -52,6 +53,7 @@ var ( errUintOverflow = errors.New("rlp: uint overflow") errNoPointer = errors.New("rlp: interface given to Decode must be a pointer") errDecodeIntoNil = errors.New("rlp: pointer given to Decode must not be nil") + errUint256Large = errors.New("rlp: value too large for uint256") streamPool = sync.Pool{ New: func() interface{} { return new(Stream) }, @@ -148,6 +150,7 @@ func addErrorContext(err error, ctx string) error { var ( decoderInterface = reflect.TypeOf(new(Decoder)).Elem() bigInt = reflect.TypeOf(big.Int{}) + u256Int = reflect.TypeOf(uint256.Int{}) ) func makeDecoder(typ reflect.Type, tags rlpstruct.Tags) (dec decoder, err error) { @@ -159,6 +162,10 @@ func makeDecoder(typ reflect.Type, tags rlpstruct.Tags) (dec decoder, err error) return decodeBigInt, nil case typ.AssignableTo(bigInt): return decodeBigIntNoPtr, nil + case typ == reflect.PtrTo(u256Int): + return decodeU256, nil + case typ == u256Int: + return decodeU256NoPtr, nil case kind == reflect.Ptr: return makePtrDecoder(typ, tags) case reflect.PtrTo(typ).Implements(decoderInterface): @@ -235,6 +242,24 @@ func decodeBigInt(s *Stream, val reflect.Value) error { return nil } +func decodeU256NoPtr(s *Stream, val reflect.Value) error { + return decodeU256(s, val.Addr()) +} + +func decodeU256(s *Stream, val reflect.Value) error { + i := val.Interface().(*uint256.Int) + if i == nil { + i = new(uint256.Int) + val.Set(reflect.ValueOf(i)) + } + + err := s.ReadUint256(i) + if err != nil { + return wrapStreamError(err, val.Type()) + } + return nil +} + func makeListDecoder(typ reflect.Type, tag rlpstruct.Tags) (decoder, error) { etype := typ.Elem() if etype.Kind() == reflect.Uint8 && !reflect.PtrTo(etype).Implements(decoderInterface) { @@ -863,6 +888,45 @@ func (s *Stream) decodeBigInt(dst *big.Int) error { return nil } +// ReadUint256 decodes the next value as a uint256. +func (s *Stream) ReadUint256(dst *uint256.Int) error { + var buffer []byte + kind, size, err := s.Kind() + switch { + case err != nil: + return err + case kind == List: + return ErrExpectedString + case kind == Byte: + buffer = s.uintbuf[:1] + buffer[0] = s.byteval + s.kind = -1 // re-arm Kind + case size == 0: + // Avoid zero-length read. + s.kind = -1 + case size <= uint64(len(s.uintbuf)): + // All possible uint256 values fit into s.uintbuf. + buffer = s.uintbuf[:size] + if err := s.readFull(buffer); err != nil { + return err + } + // Reject inputs where single byte encoding should have been used. + if size == 1 && buffer[0] < 128 { + return ErrCanonSize + } + default: + return errUint256Large + } + + // Reject leading zero bytes. + if len(buffer) > 0 && buffer[0] == 0 { + return ErrCanonInt + } + // Set the integer bytes. + dst.SetBytes(buffer) + return nil +} + // Decode decodes a value and stores the result in the value pointed // to by val. Please see the documentation for the Decode function // to learn about the decoding rules. diff --git a/rlp/decode_test.go b/rlp/decode_test.go index dbcfcffed1..07d9c579a6 100644 --- a/rlp/decode_test.go +++ b/rlp/decode_test.go @@ -28,6 +28,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/common/math" + "github.com/holiman/uint256" ) func TestStreamKind(t *testing.T) { @@ -468,6 +469,10 @@ var ( veryVeryBigInt = new(big.Int).Exp(veryBigInt, big.NewInt(8), nil) ) +var ( + veryBigInt256, _ = uint256.FromBig(veryBigInt) +) + var decodeTests = []decodeTest{ // booleans {input: "01", ptr: new(bool), value: true}, @@ -541,11 +546,27 @@ var decodeTests = []decodeTest{ {input: "89FFFFFFFFFFFFFFFFFF", ptr: new(*big.Int), value: veryBigInt}, {input: "B848FFFFFFFFFFFFFFFFF800000000000000001BFFFFFFFFFFFFFFFFC8000000000000000045FFFFFFFFFFFFFFFFC800000000000000001BFFFFFFFFFFFFFFFFF8000000000000000001", ptr: new(*big.Int), value: veryVeryBigInt}, {input: "10", ptr: new(big.Int), value: *big.NewInt(16)}, // non-pointer also works + + // big int errors {input: "C0", ptr: new(*big.Int), error: "rlp: expected input string or byte for *big.Int"}, {input: "00", ptr: new(*big.Int), error: "rlp: non-canonical integer (leading zero bytes) for *big.Int"}, {input: "820001", ptr: new(*big.Int), error: "rlp: non-canonical integer (leading zero bytes) for *big.Int"}, {input: "8105", ptr: new(*big.Int), error: "rlp: non-canonical size information for *big.Int"}, + // uint256 + {input: "80", ptr: new(*uint256.Int), value: uint256.NewInt(0)}, + {input: "01", ptr: new(*uint256.Int), value: uint256.NewInt(1)}, + {input: "88FFFFFFFFFFFFFFFF", ptr: new(*uint256.Int), value: uint256.NewInt(math.MaxUint64)}, + {input: "89FFFFFFFFFFFFFFFFFF", ptr: new(*uint256.Int), value: veryBigInt256}, + {input: "10", ptr: new(uint256.Int), value: *uint256.NewInt(16)}, // non-pointer also works + + // uint256 errors + {input: "C0", ptr: new(*uint256.Int), error: "rlp: expected input string or byte for *uint256.Int"}, + {input: "00", ptr: new(*uint256.Int), error: "rlp: non-canonical integer (leading zero bytes) for *uint256.Int"}, + {input: "820001", ptr: new(*uint256.Int), error: "rlp: non-canonical integer (leading zero bytes) for *uint256.Int"}, + {input: "8105", ptr: new(*uint256.Int), error: "rlp: non-canonical size information for *uint256.Int"}, + {input: "A1FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF00", ptr: new(*uint256.Int), error: "rlp: value too large for uint256"}, + // structs { input: "C50583343434", @@ -1223,6 +1244,27 @@ func BenchmarkDecodeBigInts(b *testing.B) { } } +func BenchmarkDecodeU256Ints(b *testing.B) { + ints := make([]*uint256.Int, 200) + for i := range ints { + ints[i], _ = uint256.FromBig(math.BigPow(2, int64(i))) + } + enc, err := EncodeToBytes(ints) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(enc))) + b.ReportAllocs() + b.ResetTimer() + + var out []*uint256.Int + for i := 0; i < b.N; i++ { + if err := DecodeBytes(enc, &out); err != nil { + b.Fatal(err) + } + } +} + func encodeTestSlice(n uint) []byte { s := make([]uint, n) for i := uint(0); i < n; i++ { diff --git a/rlp/encbuffer.go b/rlp/encbuffer.go index d2c6d93bca..9ac4fcb2ca 100644 --- a/rlp/encbuffer.go +++ b/rlp/encbuffer.go @@ -17,10 +17,13 @@ package rlp import ( + "encoding/binary" "io" "math/big" "reflect" "sync" + + "github.com/holiman/uint256" ) type encBuffer struct { @@ -169,6 +172,23 @@ func (w *encBuffer) writeBigInt(i *big.Int) { } } +// writeUint256 writes z as an integer. +func (w *encBuffer) writeUint256(z *uint256.Int) { + bitlen := z.BitLen() + if bitlen <= 64 { + w.writeUint64(z.Uint64()) + return + } + nBytes := byte((bitlen + 7) / 8) + var b [33]byte + binary.BigEndian.PutUint64(b[1:9], z[3]) + binary.BigEndian.PutUint64(b[9:17], z[2]) + binary.BigEndian.PutUint64(b[17:25], z[1]) + binary.BigEndian.PutUint64(b[25:33], z[0]) + b[32-nBytes] = 0x80 + nBytes + w.str = append(w.str, b[32-nBytes:]...) +} + // list adds a new list header to the header stack. It returns the index of the header. // Call listEnd with this index after encoding the content of the list. func (buf *encBuffer) list() int { @@ -376,6 +396,11 @@ func (w EncoderBuffer) WriteBigInt(i *big.Int) { w.buf.writeBigInt(i) } +// WriteUint256 encodes uint256.Int as an RLP string. +func (w EncoderBuffer) WriteUint256(i *uint256.Int) { + w.buf.writeUint256(i) +} + // WriteBytes encodes b as an RLP string. func (w EncoderBuffer) WriteBytes(b []byte) { w.buf.writeBytes(b) diff --git a/rlp/encode.go b/rlp/encode.go index a377a1ef4c..3fac0bd2d3 100644 --- a/rlp/encode.go +++ b/rlp/encode.go @@ -24,6 +24,7 @@ import ( "reflect" "github.com/ethereum/go-ethereum/rlp/internal/rlpstruct" + "github.com/holiman/uint256" ) var ( @@ -144,6 +145,10 @@ func makeWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) { return writeBigIntPtr, nil case typ.AssignableTo(bigInt): return writeBigIntNoPtr, nil + case typ == reflect.PtrTo(u256Int): + return writeU256IntPtr, nil + case typ == u256Int: + return writeU256IntNoPtr, nil case kind == reflect.Ptr: return makePtrWriter(typ, ts) case reflect.PtrTo(typ).Implements(encoderInterface): @@ -206,6 +211,22 @@ func writeBigIntNoPtr(val reflect.Value, w *encBuffer) error { return nil } +func writeU256IntPtr(val reflect.Value, w *encBuffer) error { + ptr := val.Interface().(*uint256.Int) + if ptr == nil { + w.str = append(w.str, 0x80) + return nil + } + w.writeUint256(ptr) + return nil +} + +func writeU256IntNoPtr(val reflect.Value, w *encBuffer) error { + i := val.Interface().(uint256.Int) + w.writeUint256(&i) + return nil +} + func writeBytes(val reflect.Value, w *encBuffer) error { w.writeBytes(val.Bytes()) return nil diff --git a/rlp/encode_test.go b/rlp/encode_test.go index 82c490a802..02be47d0ef 100644 --- a/rlp/encode_test.go +++ b/rlp/encode_test.go @@ -27,6 +27,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/common/math" + "github.com/holiman/uint256" ) type testEncoder struct { @@ -147,6 +148,30 @@ var encTests = []encTest{ {val: big.NewInt(-1), error: "rlp: cannot encode negative big.Int"}, {val: *big.NewInt(-1), error: "rlp: cannot encode negative big.Int"}, + // uint256 + {val: uint256.NewInt(0), output: "80"}, + {val: uint256.NewInt(1), output: "01"}, + {val: uint256.NewInt(127), output: "7F"}, + {val: uint256.NewInt(128), output: "8180"}, + {val: uint256.NewInt(256), output: "820100"}, + {val: uint256.NewInt(1024), output: "820400"}, + {val: uint256.NewInt(0xFFFFFF), output: "83FFFFFF"}, + {val: uint256.NewInt(0xFFFFFFFF), output: "84FFFFFFFF"}, + {val: uint256.NewInt(0xFFFFFFFFFF), output: "85FFFFFFFFFF"}, + {val: uint256.NewInt(0xFFFFFFFFFFFF), output: "86FFFFFFFFFFFF"}, + {val: uint256.NewInt(0xFFFFFFFFFFFFFF), output: "87FFFFFFFFFFFFFF"}, + { + val: new(uint256.Int).SetBytes(unhex("102030405060708090A0B0C0D0E0F2")), + output: "8F102030405060708090A0B0C0D0E0F2", + }, + { + val: new(uint256.Int).SetBytes(unhex("0100020003000400050006000700080009000A000B000C000D000E01")), + output: "9C0100020003000400050006000700080009000A000B000C000D000E01", + }, + // non-pointer uint256.Int + {val: *uint256.NewInt(0), output: "80"}, + {val: *uint256.NewInt(0xFFFFFF), output: "83FFFFFF"}, + // byte arrays {val: [0]byte{}, output: "80"}, {val: [1]byte{0}, output: "00"}, @@ -256,6 +281,12 @@ var encTests = []encTest{ output: "F90200CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376", }, + // Non-byte arrays are encoded as lists. + // Note that it is important to test [4]uint64 specifically, + // because that's the underlying type of uint256.Int. + {val: [4]uint32{1, 2, 3, 4}, output: "C401020304"}, + {val: [4]uint64{1, 2, 3, 4}, output: "C401020304"}, + // RawValue {val: RawValue(unhex("01")), output: "01"}, {val: RawValue(unhex("82FFFF")), output: "82FFFF"}, @@ -301,6 +332,7 @@ var encTests = []encTest{ {val: (*[]byte)(nil), output: "80"}, {val: (*[10]byte)(nil), output: "80"}, {val: (*big.Int)(nil), output: "80"}, + {val: (*uint256.Int)(nil), output: "80"}, {val: (*[]string)(nil), output: "C0"}, {val: (*[10]string)(nil), output: "C0"}, {val: (*[]interface{})(nil), output: "C0"}, @@ -509,6 +541,23 @@ func BenchmarkEncodeBigInts(b *testing.B) { } } +func BenchmarkEncodeU256Ints(b *testing.B) { + ints := make([]*uint256.Int, 200) + for i := range ints { + ints[i], _ = uint256.FromBig(math.BigPow(2, int64(i))) + } + out := bytes.NewBuffer(make([]byte, 0, 4096)) + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + out.Reset() + if err := Encode(out, ints); err != nil { + b.Fatal(err) + } + } +} + func BenchmarkEncodeConcurrentInterface(b *testing.B) { type struct1 struct { A string diff --git a/rlp/rlpgen/gen.go b/rlp/rlpgen/gen.go index 1deb5a93c2..0c65864826 100644 --- a/rlp/rlpgen/gen.go +++ b/rlp/rlpgen/gen.go @@ -283,7 +283,7 @@ func (op byteArrayOp) genDecode(ctx *genContext) (string, string) { return resultV, b.String() } -// bigIntNoPtrOp handles non-pointer big.Int. +// bigIntOp handles big.Int. // This exists because big.Int has it's own decoder operation on rlp.Stream, // but the decode method returns *big.Int, so it needs to be dereferenced. type bigIntOp struct { @@ -330,6 +330,49 @@ func (op bigIntOp) genDecode(ctx *genContext) (string, string) { return result, b.String() } +// uint256Op handles "github.com/holiman/uint256".Int +type uint256Op struct { + pointer bool +} + +func (op uint256Op) genWrite(ctx *genContext, v string) string { + var b bytes.Buffer + + dst := v + if !op.pointer { + dst = "&" + v + } + fmt.Fprintf(&b, "w.WriteUint256(%s)\n", dst) + + // Wrap with nil check. + if op.pointer { + code := b.String() + b.Reset() + fmt.Fprintf(&b, "if %s == nil {\n", v) + fmt.Fprintf(&b, " w.Write(rlp.EmptyString)") + fmt.Fprintf(&b, "} else {\n") + fmt.Fprint(&b, code) + fmt.Fprintf(&b, "}\n") + } + + return b.String() +} + +func (op uint256Op) genDecode(ctx *genContext) (string, string) { + ctx.addImport("github.com/holiman/uint256") + + var b bytes.Buffer + resultV := ctx.temp() + fmt.Fprintf(&b, "var %s uint256.Int\n", resultV) + fmt.Fprintf(&b, "if err := dec.ReadUint256(&%s); err != nil { return err }\n", resultV) + + result := resultV + if op.pointer { + result = "&" + resultV + } + return result, b.String() +} + // encoderDecoderOp handles rlp.Encoder and rlp.Decoder. // In order to be used with this, the type must implement both interfaces. // This restriction may be lifted in the future by creating separate ops for @@ -635,6 +678,9 @@ func (bctx *buildContext) makeOp(name *types.Named, typ types.Type, tags rlpstru if isBigInt(typ) { return bigIntOp{}, nil } + if isUint256(typ) { + return uint256Op{}, nil + } if typ == bctx.rawValueType { return bctx.makeRawValueOp(), nil } @@ -647,6 +693,9 @@ func (bctx *buildContext) makeOp(name *types.Named, typ types.Type, tags rlpstru if isBigInt(typ.Elem()) { return bigIntOp{pointer: true}, nil } + if isUint256(typ.Elem()) { + return uint256Op{pointer: true}, nil + } // Encoder/Decoder interfaces. if bctx.isEncoder(typ) { if bctx.isDecoder(typ) { diff --git a/rlp/rlpgen/gen_test.go b/rlp/rlpgen/gen_test.go index 241c34b6df..ff3b8385ae 100644 --- a/rlp/rlpgen/gen_test.go +++ b/rlp/rlpgen/gen_test.go @@ -47,7 +47,7 @@ func init() { } } -var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint"} +var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint", "uint256"} func TestOutput(t *testing.T) { for _, test := range tests { diff --git a/rlp/rlpgen/testdata/uint256.in.txt b/rlp/rlpgen/testdata/uint256.in.txt new file mode 100644 index 0000000000..ed16e0a788 --- /dev/null +++ b/rlp/rlpgen/testdata/uint256.in.txt @@ -0,0 +1,10 @@ +// -*- mode: go -*- + +package test + +import "github.com/holiman/uint256" + +type Test struct { + Int *uint256.Int + IntNoPtr uint256.Int +} diff --git a/rlp/rlpgen/testdata/uint256.out.txt b/rlp/rlpgen/testdata/uint256.out.txt new file mode 100644 index 0000000000..5e6d3ed992 --- /dev/null +++ b/rlp/rlpgen/testdata/uint256.out.txt @@ -0,0 +1,44 @@ +package test + +import "github.com/ethereum/go-ethereum/rlp" +import "github.com/holiman/uint256" +import "io" + +func (obj *Test) EncodeRLP(_w io.Writer) error { + w := rlp.NewEncoderBuffer(_w) + _tmp0 := w.List() + if obj.Int == nil { + w.Write(rlp.EmptyString) + } else { + w.WriteUint256(obj.Int) + } + w.WriteUint256(&obj.IntNoPtr) + w.ListEnd(_tmp0) + return w.Flush() +} + +func (obj *Test) DecodeRLP(dec *rlp.Stream) error { + var _tmp0 Test + { + if _, err := dec.List(); err != nil { + return err + } + // Int: + var _tmp1 uint256.Int + if err := dec.ReadUint256(&_tmp1); err != nil { + return err + } + _tmp0.Int = &_tmp1 + // IntNoPtr: + var _tmp2 uint256.Int + if err := dec.ReadUint256(&_tmp2); err != nil { + return err + } + _tmp0.IntNoPtr = _tmp2 + if err := dec.ListEnd(); err != nil { + return err + } + } + *obj = _tmp0 + return nil +} diff --git a/rlp/rlpgen/types.go b/rlp/rlpgen/types.go index 19694262e5..ea7dc96d88 100644 --- a/rlp/rlpgen/types.go +++ b/rlp/rlpgen/types.go @@ -97,6 +97,16 @@ func isBigInt(typ types.Type) bool { return name.Pkg().Path() == "math/big" && name.Name() == "Int" } +// isUint256 checks whether 'typ' is "github.com/holiman/uint256".Int. +func isUint256(typ types.Type) bool { + named, ok := typ.(*types.Named) + if !ok { + return false + } + name := named.Obj() + return name.Pkg().Path() == "github.com/holiman/uint256" && name.Name() == "Int" +} + // isByte checks whether the underlying type of 'typ' is uint8. func isByte(typ types.Type) bool { basic, ok := resolveUnderlying(typ).(*types.Basic) From ee8e83fa5f6cb261dad2ed0a7bbcde4930c41e6c Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Fri, 17 Mar 2023 11:06:06 -0700 Subject: [PATCH 19/24] eth: fix output file permissions in admin_exportChain (#26912) * api: Use 0700 file permissions for ExportChain * change perm to 0644 * Update api.go --------- Co-authored-by: Felix Lange --- eth/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/api.go b/eth/api.go index cd6510ccf9..66df927ac6 100644 --- a/eth/api.go +++ b/eth/api.go @@ -161,7 +161,7 @@ func (api *AdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool return false, errors.New("location would overwrite an existing file") } // Make sure we can create the file to export into - out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { return false, err } From 81b0aa0cc705e5583ba3f03ff7f3dba5965f7d49 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Mon, 20 Mar 2023 09:09:35 +0100 Subject: [PATCH 20/24] trie: reduce unit test time (#26918) --- trie/proof_test.go | 25 +++++++++++++++++++------ trie/sync_test.go | 1 + trie/trie_test.go | 6 +++--- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/trie/proof_test.go b/trie/proof_test.go index 5796a30893..6b23bcdb27 100644 --- a/trie/proof_test.go +++ b/trie/proof_test.go @@ -20,6 +20,7 @@ import ( "bytes" crand "crypto/rand" "encoding/binary" + "fmt" mrand "math/rand" "sort" "testing" @@ -30,6 +31,24 @@ import ( "github.com/ethereum/go-ethereum/ethdb/memorydb" ) +// Prng is a pseudo random number generator seeded by strong randomness. +// The randomness is printed on startup in order to make failures reproducible. +var prng = initRnd() + +func initRnd() *mrand.Rand { + var seed [8]byte + crand.Read(seed[:]) + rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:])))) + fmt.Printf("Seed: %x\n", seed) + return rnd +} + +func randBytes(n int) []byte { + r := make([]byte, n) + prng.Read(r) + return r +} + // makeProvers creates Merkle trie provers based on different implementations to // test all variations. func makeProvers(trie *Trie) []func(key []byte) *memorydb.Database { @@ -1041,12 +1060,6 @@ func randomTrie(n int) (*Trie, map[string]*kv) { return trie, vals } -func randBytes(n int) []byte { - r := make([]byte, n) - crand.Read(r) - return r -} - func nonRandomTrie(n int) (*Trie, map[string]*kv) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) vals := make(map[string]*kv) diff --git a/trie/sync_test.go b/trie/sync_test.go index fc871a22c8..8fec378333 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -434,6 +434,7 @@ func TestDuplicateAvoidanceSync(t *testing.T) { // Tests that at any point in time during a sync, only complete sub-tries are in // the database. func TestIncompleteSync(t *testing.T) { + t.Parallel() // Create a random trie to copy srcDb, srcTrie, _ := makeTestTrie() diff --git a/trie/trie_test.go b/trie/trie_test.go index e415937073..089bb44a97 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -18,7 +18,6 @@ package trie import ( "bytes" - crand "crypto/rand" "encoding/binary" "errors" "fmt" @@ -1146,13 +1145,14 @@ func deleteString(trie *Trie, k string) { func TestDecodeNode(t *testing.T) { t.Parallel() + var ( hash = make([]byte, 20) elems = make([]byte, 20) ) for i := 0; i < 5000000; i++ { - crand.Read(hash) - crand.Read(elems) + prng.Read(hash) + prng.Read(elems) decodeNode(hash, elems) } } From 80ff0b4e6a1b3059195ad7949a2e5dbf12065592 Mon Sep 17 00:00:00 2001 From: s7v7nislands Date: Mon, 20 Mar 2023 16:12:24 +0800 Subject: [PATCH 21/24] core/txpool: use atomic int added in go1.19 (#26913) Makes use of atomic.Uint64 instead of atomic by pointer --- core/txpool/list.go | 15 +++++------ core/txpool/txpool.go | 3 +-- core/txpool/txpool2_test.go | 6 ++--- core/txpool/txpool_test.go | 52 +++++++++++++++++++++---------------- 4 files changed, 39 insertions(+), 37 deletions(-) diff --git a/core/txpool/list.go b/core/txpool/list.go index 724bb6caca..639d69bcbe 100644 --- a/core/txpool/list.go +++ b/core/txpool/list.go @@ -508,10 +508,7 @@ func (h *priceHeap) Pop() interface{} { // the floating heap is better. When baseFee is decreasing they behave similarly. type pricedList struct { // Number of stale price points to (re-heap trigger). - // This field is accessed atomically, and must be the first field - // to ensure it has correct alignment for atomic.AddInt64. - // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG. - stales int64 + stales atomic.Int64 all *lookup // Pointer to the map of all transactions urgent, floating priceHeap // Heaps of prices of all the stored **remote** transactions @@ -545,7 +542,7 @@ func (l *pricedList) Put(tx *types.Transaction, local bool) { // the heap if a large enough ratio of transactions go stale. func (l *pricedList) Removed(count int) { // Bump the stale counter, but exit if still too low (< 25%) - stales := atomic.AddInt64(&l.stales, int64(count)) + stales := l.stales.Add(int64(count)) if int(stales) <= (len(l.urgent.list)+len(l.floating.list))/4 { return } @@ -570,7 +567,7 @@ func (l *pricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool { for len(h.list) > 0 { head := h.list[0] if l.all.GetRemote(head.Hash()) == nil { // Removed or migrated - atomic.AddInt64(&l.stales, -1) + l.stales.Add(-1) heap.Pop(h) continue } @@ -597,7 +594,7 @@ func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) { // Discard stale transactions if found during cleanup tx := heap.Pop(&l.urgent).(*types.Transaction) if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated - atomic.AddInt64(&l.stales, -1) + l.stales.Add(-1) continue } // Non stale transaction found, move to floating heap @@ -610,7 +607,7 @@ func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) { // Discard stale transactions if found during cleanup tx := heap.Pop(&l.floating).(*types.Transaction) if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated - atomic.AddInt64(&l.stales, -1) + l.stales.Add(-1) continue } // Non stale transaction found, discard it @@ -633,7 +630,7 @@ func (l *pricedList) Reheap() { l.reheapMu.Lock() defer l.reheapMu.Unlock() start := time.Now() - atomic.StoreInt64(&l.stales, 0) + l.stales.Store(0) l.urgent.list = make([]*types.Transaction, 0, l.all.RemoteCount()) l.all.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { l.urgent.list = append(l.urgent.list, tx) diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index ad556f39fb..cc673ccc10 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -23,7 +23,6 @@ import ( "math/big" "sort" "sync" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -383,7 +382,7 @@ func (pool *TxPool) loop() { pool.mu.RLock() pending, queued := pool.stats() pool.mu.RUnlock() - stales := int(atomic.LoadInt64(&pool.priced.stales)) + stales := int(pool.priced.stales.Load()) if pending != prevPending || queued != prevQueued || stales != prevStales { log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) diff --git a/core/txpool/txpool2_test.go b/core/txpool/txpool2_test.go index 20d6dd713a..6d84975d83 100644 --- a/core/txpool/txpool2_test.go +++ b/core/txpool/txpool2_test.go @@ -79,7 +79,7 @@ func TestTransactionFutureAttack(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalQueue = 100 config.GlobalSlots = 100 @@ -115,7 +115,7 @@ func TestTransactionFuture1559(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) defer pool.Stop() @@ -147,7 +147,7 @@ func TestTransactionZAttack(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) defer pool.Stop() // Create a number of test accounts, fund them and make transactions diff --git a/core/txpool/txpool_test.go b/core/txpool/txpool_test.go index c4c62db2d6..7771c5f7cd 100644 --- a/core/txpool/txpool_test.go +++ b/core/txpool/txpool_test.go @@ -59,15 +59,21 @@ func init() { } type testBlockChain struct { - gasLimit uint64 // must be first field for 64 bit alignment (atomic access) + gasLimit atomic.Uint64 statedb *state.StateDB chainHeadFeed *event.Feed } +func newTestBlockChain(gasLimit uint64, statedb *state.StateDB, chainHeadFeed *event.Feed) *testBlockChain { + bc := testBlockChain{statedb: statedb, chainHeadFeed: new(event.Feed)} + bc.gasLimit.Store(gasLimit) + return &bc +} + func (bc *testBlockChain) CurrentBlock() *types.Header { return &types.Header{ Number: new(big.Int), - GasLimit: atomic.LoadUint64(&bc.gasLimit), + GasLimit: bc.gasLimit.Load(), } } @@ -121,7 +127,7 @@ func setupPool() (*TxPool, *ecdsa.PrivateKey) { func setupPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) { statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{10000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(10000000, statedb, new(event.Feed)) key, _ := crypto.GenerateKey() pool := NewTxPool(testTxPoolConfig, config, blockchain) @@ -236,7 +242,7 @@ func TestStateChangeDuringReset(t *testing.T) { // setup pool with 2 transaction in it statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether)) - blockchain := &testChain{&testBlockChain{1000000000, statedb, new(event.Feed)}, address, &trigger} + blockchain := &testChain{newTestBlockChain(1000000000, statedb, new(event.Feed)), address, &trigger} tx0 := transaction(0, 100000, key) tx1 := transaction(1, 100000, key) @@ -430,7 +436,7 @@ func TestChainFork(t *testing.T) { statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.AddBalance(addr, big.NewInt(100000000000000)) - pool.chain = &testBlockChain{1000000, statedb, new(event.Feed)} + pool.chain = newTestBlockChain(1000000, statedb, new(event.Feed)) <-pool.requestReset(nil, nil) } resetState() @@ -459,7 +465,7 @@ func TestDoubleNonce(t *testing.T) { statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.AddBalance(addr, big.NewInt(100000000000000)) - pool.chain = &testBlockChain{1000000, statedb, new(event.Feed)} + pool.chain = newTestBlockChain(1000000, statedb, new(event.Feed)) <-pool.requestReset(nil, nil) } resetState() @@ -629,7 +635,7 @@ func TestDropping(t *testing.T) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4) } // Reduce the block gas limit, check that invalidated transactions are dropped - atomic.StoreUint64(&pool.chain.(*testBlockChain).gasLimit, 100) + pool.chain.(*testBlockChain).gasLimit.Store(100) <-pool.requestReset(nil, nil) if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { @@ -657,7 +663,7 @@ func TestPostponing(t *testing.T) { // Create the pool to test the postponing with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() @@ -869,7 +875,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { // Create the pool to test the limit enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.NoLocals = nolocals @@ -961,7 +967,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { // Create the pool to test the non-expiration enforcement statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.Lifetime = time.Second @@ -1146,7 +1152,7 @@ func TestPendingGlobalLimiting(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = config.AccountSlots * 10 @@ -1248,7 +1254,7 @@ func TestCapClearsFromAll(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.AccountSlots = 2 @@ -1282,7 +1288,7 @@ func TestPendingMinimumAllowance(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 1 @@ -1330,7 +1336,7 @@ func TestRepricing(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() @@ -1578,7 +1584,7 @@ func TestRepricingKeepsLocals(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) defer pool.Stop() @@ -1651,7 +1657,7 @@ func TestUnderpricing(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 2 @@ -1765,7 +1771,7 @@ func TestStableUnderpricing(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 128 @@ -1997,7 +2003,7 @@ func TestDeduplication(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() @@ -2063,7 +2069,7 @@ func TestReplacement(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() @@ -2268,7 +2274,7 @@ func testJournaling(t *testing.T, nolocals bool) { // Create the original pool to inject transaction into the journal statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.NoLocals = nolocals @@ -2310,7 +2316,7 @@ func testJournaling(t *testing.T, nolocals bool) { // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive pool.Stop() statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain = newTestBlockChain(1000000, statedb, new(event.Feed)) pool = NewTxPool(config, params.TestChainConfig, blockchain) @@ -2337,7 +2343,7 @@ func testJournaling(t *testing.T, nolocals bool) { pool.Stop() statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain = newTestBlockChain(1000000, statedb, new(event.Feed)) pool = NewTxPool(config, params.TestChainConfig, blockchain) pending, queued = pool.Stats() @@ -2366,7 +2372,7 @@ func TestStatusCheck(t *testing.T) { // Create the pool to test the status retrievals with statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() From 5d23d21fffa8d11e599d8e482323c321a88658f4 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Mon, 20 Mar 2023 12:38:34 +0100 Subject: [PATCH 22/24] params: schedule shanghai fork on mainnet (#26908) Schedules the shanghai hardfork on timestamp 1681338455 as discussed on ACDE 157: https://github.com/ethereum/execution-specs/pull/727 --- core/forkid/forkid_test.go | 185 +++++++++++++++---------------------- params/config.go | 1 + 2 files changed, 74 insertions(+), 112 deletions(-) diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 4dda280e71..ab59a454d8 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -26,15 +26,9 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -func u64(val uint64) *uint64 { return &val } - // TestCreation tests that different genesis and fork rule combinations result in // the correct fork ID. func TestCreation(t *testing.T) { - // Temporary non-existent scenario TODO(karalabe): delete when Shanghai is enabled - timestampedConfig := *params.MainnetChainConfig - timestampedConfig.ShanghaiTime = u64(1668000000) - type testcase struct { head uint64 time uint64 @@ -50,32 +44,34 @@ func TestCreation(t *testing.T) { params.MainnetChainConfig, params.MainnetGenesisHash, []testcase{ - {0, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced - {1149999, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block - {1150000, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block - {1919999, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block - {1920000, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block - {2462999, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block - {2463000, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block - {2674999, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block - {2675000, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block - {4369999, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block - {4370000, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block - {7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block - {7280000, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block - {9068999, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block - {9069000, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block - {9199999, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block - {9200000, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block - {12243999, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block - {12244000, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block - {12964999, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block - {12965000, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block - {13772999, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block - {13773000, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // First Arrow Glacier block - {15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block - {15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}}, // First Gray Glacier block - {20000000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}}, // Future Gray Glacier block + {0, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced + {1149999, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block + {1150000, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block + {1919999, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block + {1920000, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block + {2462999, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block + {2463000, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block + {2674999, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block + {2675000, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block + {4369999, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block + {4370000, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block + {7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block + {7280000, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block + {9068999, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block + {9069000, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block + {9199999, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block + {9200000, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block + {12243999, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block + {12244000, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block + {12964999, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block + {12965000, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block + {13772999, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block + {13773000, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // First Arrow Glacier block + {15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block + {15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // First Gray Glacier block + {20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block + {20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // First Shanghai block + {30000000, 2000000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // Future Shanghai block }, }, // Rinkeby test cases @@ -131,41 +127,6 @@ func TestCreation(t *testing.T) { {1735372, 1677557088, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 0}}, // First Shanghai block }, }, - // Temporary timestamped test cases - { - ×tampedConfig, - params.MainnetGenesisHash, - []testcase{ - {0, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced - {1149999, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block - {1150000, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block - {1919999, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block - {1920000, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block - {2462999, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block - {2463000, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block - {2674999, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block - {2675000, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block - {4369999, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block - {4370000, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block - {7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block - {7280000, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block - {9068999, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block - {9069000, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block - {9199999, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block - {9200000, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block - {12243999, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block - {12244000, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block - {12964999, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block - {12965000, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block - {13772999, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block - {13773000, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // First Arrow Glacier block - {15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block - {15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1668000000}}, // First Gray Glacier block - {19999999, 1667999999, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1668000000}}, // Last Gray Glacier block - {20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}}, // First Shanghai block - {20000000, 2668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}}, // Future Shanghai block - }, - }, } for i, tt := range tests { for j, ttt := range tt.cases { @@ -179,9 +140,9 @@ func TestCreation(t *testing.T) { // TestValidation tests that a local peer correctly validates and accepts a remote // fork ID. func TestValidation(t *testing.T) { - // Temporary non-existent scenario TODO(karalabe): delete when Shanghai is enabled - timestampedConfig := *params.MainnetChainConfig - timestampedConfig.ShanghaiTime = u64(1668000000) + // Config that has not timestamp enabled + legacyConfig := *params.MainnetChainConfig + legacyConfig.ShanghaiTime = nil tests := []struct { config *params.ChainConfig @@ -195,60 +156,60 @@ func TestValidation(t *testing.T) { //------------------ // Local is mainnet Gray Glacier, remote announces the same. No future fork is announced. - {params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, nil}, + {&legacyConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, nil}, // Local is mainnet Gray Glacier, remote announces the same. Remote also announces a next fork // at block 0xffffffff, but that is uncertain. - {params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: math.MaxUint64}, nil}, + {&legacyConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: math.MaxUint64}, nil}, // Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces // also Byzantium, but it's not yet aware of Petersburg (e.g. non updated node before the fork). // In this case we don't know if Petersburg passed yet or not. - {params.MainnetChainConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil}, + {&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil}, // Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces // also Byzantium, and it's also aware of Petersburg (e.g. updated node before the fork). We // don't know if Petersburg passed yet (will pass) or not. - {params.MainnetChainConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil}, + {&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil}, // Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces // also Byzantium, and it's also aware of some random fork (e.g. misconfigured Petersburg). As // neither forks passed at neither nodes, they may mismatch, but we still connect for now. - {params.MainnetChainConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: math.MaxUint64}, nil}, + {&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: math.MaxUint64}, nil}, // Local is mainnet exactly on Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote // is simply out of sync, accept. - {params.MainnetChainConfig, 7280000, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil}, + {&legacyConfig, 7280000, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil}, // Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote // is simply out of sync, accept. - {params.MainnetChainConfig, 7987396, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil}, + {&legacyConfig, 7987396, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil}, // Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium. Remote // is definitely out of sync. It may or may not need the Petersburg update, we don't know yet. - {params.MainnetChainConfig, 7987396, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil}, + {&legacyConfig, 7987396, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil}, // Local is mainnet Byzantium, remote announces Petersburg. Local is out of sync, accept. - {params.MainnetChainConfig, 7279999, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 0}, nil}, + {&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 0}, nil}, // Local is mainnet Spurious, remote announces Byzantium, but is not aware of Petersburg. Local // out of sync. Local also knows about a future fork, but that is uncertain yet. - {params.MainnetChainConfig, 4369999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil}, + {&legacyConfig, 4369999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil}, // Local is mainnet Petersburg. remote announces Byzantium but is not aware of further forks. // Remote needs software update. - {params.MainnetChainConfig, 7987396, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, ErrRemoteStale}, + {&legacyConfig, 7987396, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, ErrRemoteStale}, // Local is mainnet Petersburg, and isn't aware of more forks. Remote announces Petersburg + // 0xffffffff. Local needs software update, reject. - {params.MainnetChainConfig, 7987396, 0, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale}, + {&legacyConfig, 7987396, 0, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale}, // Local is mainnet Byzantium, and is aware of Petersburg. Remote announces Petersburg + // 0xffffffff. Local needs software update, reject. - {params.MainnetChainConfig, 7279999, 0, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale}, + {&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale}, // Local is mainnet Petersburg, remote is Rinkeby Petersburg. - {params.MainnetChainConfig, 7987396, 0, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale}, + {&legacyConfig, 7987396, 0, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale}, // Local is mainnet Gray Glacier, far in the future. Remote announces Gopherium (non existing fork) // at some future block 88888888, for itself, but past block for local. Local is incompatible. @@ -256,13 +217,13 @@ func TestValidation(t *testing.T) { // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). // // TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config - {params.MainnetChainConfig, 88888888, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale}, + {&legacyConfig, 88888888, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale}, // Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing // fork) at block 7279999, before Petersburg. Local is incompatible. // // TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config - {params.MainnetChainConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale}, + {&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale}, //------------------------------------ // Block to timestamp transition tests @@ -271,40 +232,40 @@ func TestValidation(t *testing.T) { // Local is mainnet currently in Gray Glacier only (so it's aware of Shanghai), remote announces // also Gray Glacier, but it's not yet aware of Shanghai (e.g. non updated node before the fork). // In this case we don't know if Shanghai passed yet or not. - {×tampedConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, nil}, + {params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, nil}, // Local is mainnet currently in Gray Glacier only (so it's aware of Shanghai), remote announces // also Gray Glacier, and it's also aware of Shanghai (e.g. updated node before the fork). We // don't know if Shanghai passed yet (will pass) or not. - {×tampedConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1668000000}, nil}, + {params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}, nil}, // Local is mainnet currently in Gray Glacier only (so it's aware of Shanghai), remote announces // also Gray Glacier, and it's also aware of some random fork (e.g. misconfigured Shanghai). As // neither forks passed at neither nodes, they may mismatch, but we still connect for now. - {×tampedConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: math.MaxUint64}, nil}, + {params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: math.MaxUint64}, nil}, // Local is mainnet exactly on Shanghai, remote announces Gray Glacier + knowledge about Shanghai. Remote // is simply out of sync, accept. - {×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1668000000}, nil}, + {params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}, nil}, // Local is mainnet Shanghai, remote announces Gray Glacier + knowledge about Shanghai. Remote // is simply out of sync, accept. - {×tampedConfig, 20123456, 1668123456, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1668000000}, nil}, + {params.MainnetChainConfig, 20123456, 1681338456, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}, nil}, // Local is mainnet Shanghai, remote announces Arrow Glacier + knowledge about Gray Glacier. Remote // is definitely out of sync. It may or may not need the Shanghai update, we don't know yet. - {×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}, nil}, + {params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}, nil}, // Local is mainnet Gray Glacier, remote announces Shanghai. Local is out of sync, accept. - {×tampedConfig, 15050000, 0, ID{Hash: checksumToBytes(0x71147644), Next: 0}, nil}, + {params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}, nil}, // Local is mainnet Arrow Glacier, remote announces Gray Glacier, but is not aware of Shanghai. Local // out of sync. Local also knows about a future fork, but that is uncertain yet. - {×tampedConfig, 13773000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, nil}, + {params.MainnetChainConfig, 13773000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, nil}, // Local is mainnet Shanghai. remote announces Gray Glacier but is not aware of further forks. // Remote needs software update. - {×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, ErrRemoteStale}, + {params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, ErrRemoteStale}, // Local is mainnet Gray Glacier, and isn't aware of more forks. Remote announces Gray Glacier + // 0xffffffff. Local needs software update, reject. @@ -312,7 +273,7 @@ func TestValidation(t *testing.T) { // Local is mainnet Gray Glacier, and is aware of Shanghai. Remote announces Shanghai + // 0xffffffff. Local needs software update, reject. - {×tampedConfig, 15050000, 0, ID{Hash: checksumToBytes(checksumUpdate(0x71147644, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale}, + {params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(checksumUpdate(0xdce96c2d, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale}, // Local is mainnet Gray Glacier, far in the future. Remote announces Gopherium (non existing fork) // at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible. @@ -322,92 +283,92 @@ func TestValidation(t *testing.T) { // Local is mainnet Gray Glacier. Remote is also in Gray Glacier, but announces Gopherium (non existing // fork) at block 7279999, before Shanghai. Local is incompatible. - {×tampedConfig, 19999999, 1667999999, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1667999999}, ErrLocalIncompatibleOrStale}, + {params.MainnetChainConfig, 19999999, 1667999999, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1667999999}, ErrLocalIncompatibleOrStale}, //---------------------- // Timestamp based tests //---------------------- // Local is mainnet Shanghai, remote announces the same. No future fork is announced. - {×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, nil}, + {params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}, nil}, // Local is mainnet Shanghai, remote announces the same. Remote also announces a next fork // at time 0xffffffff, but that is uncertain. - {×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: math.MaxUint64}, nil}, + {params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: math.MaxUint64}, nil}, // Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces // also Shanghai, but it's not yet aware of Cancun (e.g. non updated node before the fork). // In this case we don't know if Cancun passed yet or not. // // TODO(karalabe): Enable this when Cancun is specced - //{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, nil}, + //{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, nil}, // Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces // also Shanghai, and it's also aware of Cancun (e.g. updated node before the fork). We // don't know if Cancun passed yet (will pass) or not. // // TODO(karalabe): Enable this when Cancun is specced and update next timestamp - //{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil}, + //{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil}, // Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces // also Shanghai, and it's also aware of some random fork (e.g. misconfigured Cancun). As // neither forks passed at neither nodes, they may mismatch, but we still connect for now. // // TODO(karalabe): Enable this when Cancun is specced - //{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: math.MaxUint64}, nil}, + //{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: math.MaxUint64}, nil}, // Local is mainnet exactly on Cancun, remote announces Shanghai + knowledge about Cancun. Remote // is simply out of sync, accept. // // TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp - // {×tampedConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil}, + // {params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil}, // Local is mainnet Cancun, remote announces Shanghai + knowledge about Cancun. Remote // is simply out of sync, accept. // TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp - //{×tampedConfig, 21123456, 1678123456, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil}, + //{params.MainnetChainConfig, 21123456, 1678123456, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil}, // Local is mainnet Prague, remote announces Shanghai + knowledge about Cancun. Remote // is definitely out of sync. It may or may not need the Prague update, we don't know yet. // // TODO(karalabe): Enable this when Cancun **and** Prague is specced, update all the numbers - //{×tampedConfig, 0, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil}, + //{params.MainnetChainConfig, 0, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil}, // Local is mainnet Shanghai, remote announces Cancun. Local is out of sync, accept. // // TODO(karalabe): Enable this when Cancun is specced, update remote checksum - //{×tampedConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x00000000), Next: 0}, nil}, + //{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x00000000), Next: 0}, nil}, // Local is mainnet Shanghai, remote announces Cancun, but is not aware of Prague. Local // out of sync. Local also knows about a future fork, but that is uncertain yet. // // TODO(karalabe): Enable this when Cancun **and** Prague is specced, update remote checksum - //{×tampedConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x00000000), Next: 0}, nil}, + //{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x00000000), Next: 0}, nil}, // Local is mainnet Cancun. remote announces Shanghai but is not aware of further forks. // Remote needs software update. // // TODO(karalabe): Enable this when Cancun is specced, update local head and time - //{×tampedConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, ErrRemoteStale}, + //{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, ErrRemoteStale}, // Local is mainnet Shanghai, and isn't aware of more forks. Remote announces Shanghai + // 0xffffffff. Local needs software update, reject. - {×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x71147644, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale}, + {params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(checksumUpdate(0xdce96c2d, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale}, // Local is mainnet Shanghai, and is aware of Cancun. Remote announces Cancun + // 0xffffffff. Local needs software update, reject. // // TODO(karalabe): Enable this when Cancun is specced, update remote checksum - //{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x00000000, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale}, + //{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x00000000, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale}, // Local is mainnet Shanghai, remote is random Shanghai. - {×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale}, + {params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale}, // Local is mainnet Shanghai, far in the future. Remote announces Gopherium (non existing fork) // at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible. // // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). - {×tampedConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0x71147644), Next: 8888888888}, ErrLocalIncompatibleOrStale}, + {params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0xdce96c2d), Next: 8888888888}, ErrLocalIncompatibleOrStale}, // Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing // fork) at timestamp 1668000000, before Cancun. Local is incompatible. diff --git a/params/config.go b/params/config.go index adb5f44f09..e04b16673c 100644 --- a/params/config.go +++ b/params/config.go @@ -76,6 +76,7 @@ var ( GrayGlacierBlock: big.NewInt(15_050_000), TerminalTotalDifficulty: MainnetTerminalTotalDifficulty, // 58_750_000_000_000_000_000_000 TerminalTotalDifficultyPassed: true, + ShanghaiTime: newUint64(1681338455), Ethash: new(EthashConfig), } From e6b6a8b738069ad0579f6798ee59fde93ed13b43 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Mon, 20 Mar 2023 14:15:18 +0100 Subject: [PATCH 23/24] core/txpool: allow future local transactions (#26930) Local transactions should not be subject to the "future shouldn't churn pending txs" rule --- core/txpool/txpool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index cc673ccc10..ac4486c6cb 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -736,7 +736,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e } // If the new transaction is a future transaction it should never churn pending transactions - if pool.isFuture(from, tx) { + if !isLocal && pool.isFuture(from, tx) { var replacesPending bool for _, dropTx := range drop { dropSender, _ := types.Sender(pool.signer, dropTx) From a38f4108571d1a144dc3cf3faf8990430d109bc4 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Tue, 21 Mar 2023 09:03:04 +0100 Subject: [PATCH 24/24] params: go-ethereum v1.11.5 stable --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 0cede57bbf..2ac4a554b1 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 11 // Minor version component of the current release - VersionPatch = 5 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 11 // Minor version component of the current release + VersionPatch = 5 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string.