diff --git a/.travis.yml b/.travis.yml index 8c0af291a3..29c6d6b521 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,7 +15,7 @@ jobs: if: type = push os: linux arch: amd64 - dist: bionic + dist: noble go: 1.22.x env: - docker @@ -32,7 +32,7 @@ jobs: if: type = push os: linux arch: arm64 - dist: bionic + dist: noble go: 1.22.x env: - docker @@ -49,21 +49,20 @@ jobs: - stage: build if: type = push os: linux - dist: bionic + dist: noble sudo: required go: 1.22.x env: - azure-linux git: submodules: false # avoid cloning ethereum/tests - addons: - apt: - packages: - - gcc-multilib script: - # Build for the primary platforms that Trusty can manage + # build amd64 - go run build/ci.go install -dlgo - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + + # build 386 + - sudo -E apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib - go run build/ci.go install -dlgo -arch 386 - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds @@ -100,7 +99,7 @@ jobs: - stage: build os: linux arch: amd64 - dist: bionic + dist: noble go: 1.22.x script: - travis_wait 30 go run build/ci.go test $TEST_PACKAGES @@ -109,14 +108,14 @@ jobs: if: type = pull_request os: linux arch: arm64 - dist: bionic + dist: noble go: 1.21.x script: - travis_wait 30 go run build/ci.go test $TEST_PACKAGES - stage: build os: linux - dist: bionic + dist: noble go: 1.21.x script: - travis_wait 30 go run build/ci.go test $TEST_PACKAGES @@ -125,21 +124,14 @@ jobs: - stage: build if: type = cron || (type = push && tag ~= /^v[0-9]/) os: linux - dist: bionic + dist: noble go: 1.22.x env: - ubuntu-ppa git: submodules: false # avoid cloning ethereum/tests - addons: - apt: - packages: - - devscripts - - debhelper - - dput - - fakeroot - - python-bzrlib - - python-paramiko + before_install: + - sudo -E apt-get -yq --no-install-suggests --no-install-recommends install devscripts debhelper dput fakeroot python-bzrlib python-paramiko script: - echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts - go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " @@ -148,7 +140,7 @@ jobs: - stage: build if: type = cron os: linux - dist: bionic + dist: noble go: 1.22.x env: - azure-purge @@ -161,8 +153,7 @@ jobs: - stage: build if: type = cron os: linux - dist: bionic + dist: noble go: 1.22.x script: - travis_wait 30 go run build/ci.go test -race $TEST_PACKAGES - diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index 5c978cf0b4..df3dda60b6 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -312,11 +312,10 @@ func (ks *KeyStore) Unlock(a accounts.Account, passphrase string) error { // Lock removes the private key with the given address from memory. func (ks *KeyStore) Lock(addr common.Address) error { ks.mu.Lock() - if unl, found := ks.unlocked[addr]; found { - ks.mu.Unlock() + unl, found := ks.unlocked[addr] + ks.mu.Unlock() + if found { ks.expire(addr, unl, time.Duration(0)*time.Nanosecond) - } else { - ks.mu.Unlock() } return nil } diff --git a/accounts/scwallet/hub.go b/accounts/scwallet/hub.go index 5f1f369ca2..1b1899dc8e 100644 --- a/accounts/scwallet/hub.go +++ b/accounts/scwallet/hub.go @@ -95,6 +95,7 @@ func (hub *Hub) readPairings() error { } return err } + defer pairingFile.Close() pairingData, err := io.ReadAll(pairingFile) if err != nil { diff --git a/beacon/engine/types.go b/beacon/engine/types.go index fc77c13af7..a73691ca05 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -250,7 +250,7 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash, BlobGasUsed: params.BlobGasUsed, ParentBeaconRoot: beaconRoot, } - block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */).WithWithdrawals(params.Withdrawals) + block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: params.Withdrawals}) if block.Hash() != params.BlockHash { return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash()) } diff --git a/beacon/types/exec_payload.go b/beacon/types/exec_payload.go index 718f98f529..4448f854ad 100644 --- a/beacon/types/exec_payload.go +++ b/beacon/types/exec_payload.go @@ -63,9 +63,7 @@ func convertPayload[T payloadType](payload T, parentRoot *zrntcommon.Root) (*typ panic("unsupported block type") } - block := types.NewBlockWithHeader(&header) - block = block.WithBody(transactions, nil) - block = block.WithWithdrawals(withdrawals) + block := types.NewBlockWithHeader(&header).WithBody(types.Body{Transactions: transactions, Withdrawals: withdrawals}) if hash := block.Hash(); hash != expectedHash { return nil, fmt.Errorf("Sanity check failed, payload hash does not match (expected %x, got %x)", expectedHash, hash) } diff --git a/build/checksums.txt b/build/checksums.txt index 27577285b8..da2988452a 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -5,22 +5,48 @@ # https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/ ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz -# version:golang 1.22.2 +# version:golang 1.22.3 # https://go.dev/dl/ -374ea82b289ec738e968267cac59c7d5ff180f9492250254784b2044e90df5a9 go1.22.2.src.tar.gz -33e7f63077b1c5bce4f1ecadd4d990cf229667c40bfb00686990c950911b7ab7 go1.22.2.darwin-amd64.tar.gz -660298be38648723e783ba0398e90431de1cb288c637880cdb124f39bd977f0d go1.22.2.darwin-arm64.tar.gz -efc7162b0cad2f918ac566a923d4701feb29dc9c0ab625157d49b1cbcbba39da go1.22.2.freebsd-386.tar.gz -d753428296e6709527e291fd204700a587ffef2c0a472b21aebea11618245929 go1.22.2.freebsd-amd64.tar.gz -586d9eb7fe0489ab297ad80dd06414997df487c5cf536c490ffeaa8d8f1807a7 go1.22.2.linux-386.tar.gz -5901c52b7a78002aeff14a21f93e0f064f74ce1360fce51c6ee68cd471216a17 go1.22.2.linux-amd64.tar.gz -36e720b2d564980c162a48c7e97da2e407dfcc4239e1e58d98082dfa2486a0c1 go1.22.2.linux-arm64.tar.gz -9243dfafde06e1efe24d59df6701818e6786b4adfdf1191098050d6d023c5369 go1.22.2.linux-armv6l.tar.gz -251a8886c5113be6490bdbb955ddee98763b49c9b1bf4c8364c02d3b482dab00 go1.22.2.linux-ppc64le.tar.gz -2b39019481c28c560d65e9811a478ae10e3ef765e0f59af362031d386a71bfef go1.22.2.linux-s390x.tar.gz -651753c06df037020ef4d162c5b273452e9ba976ed17ae39e66ef7ee89d8147e go1.22.2.windows-386.zip -8e581cf330f49d3266e936521a2d8263679ef7e2fc2cbbceb85659122d883596 go1.22.2.windows-amd64.zip -ddfca5beb9a0c62254266c3090c2555d899bf3e7aa26243e7de3621108f06875 go1.22.2.windows-arm64.zip +80648ef34f903193d72a59c0dff019f5f98ae0c9aa13ade0b0ecbff991a76f68 go1.22.3.src.tar.gz +adc9f5fee89cd53d907eb542d3b269d9d8a08a66bf1ab42175450ffbb58733fb go1.22.3.aix-ppc64.tar.gz +610e48c1df4d2f852de8bc2e7fd2dc1521aac216f0c0026625db12f67f192024 go1.22.3.darwin-amd64.tar.gz +02abeab3f4b8981232237ebd88f0a9bad933bc9621791cd7720a9ca29eacbe9d go1.22.3.darwin-arm64.tar.gz +a5b3d54905f17af2ceaf7fcfe92edee67a5bd4eccd962dd89df719ace3e0894d go1.22.3.dragonfly-amd64.tar.gz +b9989ca87695ae93bacde6f3aa7b13cde5f3825515eb9ed9bbef014273739889 go1.22.3.freebsd-386.tar.gz +7483961fae29d7d768afd5c9c0f229354ca3263ab7119c20bc182761f87cbc74 go1.22.3.freebsd-amd64.tar.gz +edf1f0b8ecf68b14faeedb4f5d868a58c4777a0282bd85e5115c39c010cd0130 go1.22.3.freebsd-arm.tar.gz +572eb70e5e835fbff7d53ebf473f611d7eb458c428f8dbd98a49196883c3309e go1.22.3.freebsd-arm64.tar.gz +ef94eb2b74402e436dce970584222c4e454eb3093908591149bd2ded6862b8af go1.22.3.freebsd-riscv64.tar.gz +3c3f498c68334cbd11f72aadfb6bcb507eb8436cebc50f437a0523cd4c5e03d1 go1.22.3.illumos-amd64.tar.gz +fefba30bb0d3dd1909823ee38c9f1930c3dc5337a2ac4701c2277a329a386b57 go1.22.3.linux-386.tar.gz +8920ea521bad8f6b7bc377b4824982e011c19af27df88a815e3586ea895f1b36 go1.22.3.linux-amd64.tar.gz +6c33e52a5b26e7aa021b94475587fce80043a727a54ceb0eee2f9fc160646434 go1.22.3.linux-arm64.tar.gz +f2bacad20cd2b96f23a86d4826525d42b229fd431cc6d0dec61ff3bc448ef46e go1.22.3.linux-armv6l.tar.gz +41e9328340544893482b2928ae18a9a88ba18b2fdd29ac77f4d33cf1815bbdc2 go1.22.3.linux-loong64.tar.gz +cf4d5faff52e642492729eaf396968f43af179518be769075b90bc1bf650abf6 go1.22.3.linux-mips.tar.gz +3bd009fe2e3d2bfd52433a11cb210d1dfa50b11b4c347a293951efd9e36de945 go1.22.3.linux-mips64.tar.gz +5913b82a042188ef698f7f2dfd0cd0c71f0508a4739de9e41fceff3f4dc769b4 go1.22.3.linux-mips64le.tar.gz +441afebca555be5313867b4577f237c7b5c0fff4386e22e47875b9f805abbec5 go1.22.3.linux-mipsle.tar.gz +f3b53190a76f4a35283501ba6d94cbb72093be0c62ff735c6f9e586a1c983381 go1.22.3.linux-ppc64.tar.gz +04b7b05283de30dd2da20bf3114b2e22cc727938aed3148babaf35cc951051ac go1.22.3.linux-ppc64le.tar.gz +d4992d4a85696e3f1de06cefbfc2fd840c9c6695d77a0f35cfdc4e28b2121c20 go1.22.3.linux-riscv64.tar.gz +2aba796417a69be5f3ed489076bac79c1c02b36e29422712f9f3bf51da9cf2d4 go1.22.3.linux-s390x.tar.gz +d6e6113542dd9f23db899e177fe23772bac114a5ea5e8ee436b9da68628335a8 go1.22.3.netbsd-386.tar.gz +c33cee3075bd18ceefddd75bafa8efb51fbdc17b5ee74275122e7a927a237a4c go1.22.3.netbsd-amd64.tar.gz +1ab251df3c85f3b391a09565ca52fb6e1306527d72852d553e9ab74eabb4ecf8 go1.22.3.netbsd-arm.tar.gz +1d194fe53f5d82f9a612f848950d8af8cab7cb40ccc03f10c4eb1c9808ff1a0c go1.22.3.netbsd-arm64.tar.gz +91d6601727f08506e938640885d3ded784925045e3a4444fd9b4b936efe1b1e0 go1.22.3.openbsd-386.tar.gz +09d0c91ae35a4eea92615426992062ca236cc2f66444fb0b0a24cd3b13bd5297 go1.22.3.openbsd-amd64.tar.gz +338da30cc2c97b9458e0b4caa2509f67bba55d3de16fb7d31775baca82d2e3dc go1.22.3.openbsd-arm.tar.gz +53eadfabd2b7dd09a64941421afee2a2888e2a4f94f353b27919b1dad1171a21 go1.22.3.openbsd-arm64.tar.gz +8a1a2842ae8dcf2374bb05dff58074b368bb698dc9c211c794c1ff119cd9fdc7 go1.22.3.plan9-386.tar.gz +f9816d3dd9e730cad55085ea08c1f0c925720728f9c945fff59cd24d2ac2db7b go1.22.3.plan9-amd64.tar.gz +f4d3d7b17c9e1b1635fcb287b5b5ab5b60acc9db3ba6a27f2b2f5d6537a2ef95 go1.22.3.plan9-arm.tar.gz +46b7999ee94d91b21ad6940b5a3131ff6fe53ef97be9a34e582e2a3ad7263e95 go1.22.3.solaris-amd64.tar.gz +f60f63b8a0885e0d924f39fd284aee5438fe87d8c3d8545a312adf43e0d9edac go1.22.3.windows-386.zip +cab2af6951a6e2115824263f6df13ff069c47270f5788714fa1d776f7f60cb39 go1.22.3.windows-amd64.zip +40b37f4b068fc759f3a0dd61176a0f7570a4ba48bed8561c31d3967a3583981a go1.22.3.windows-arm.zip +59b76ee22b9b1c3afbf7f50e3cb4edb954d6c0d25e5e029ab5483a6804d61e71 go1.22.3.windows-arm64.zip # version:golangci 1.55.2 # https://github.com/golangci/golangci-lint/releases/ @@ -56,10 +82,12 @@ a5e68ae73d38748b5269fad36ac7575e3c162a5dc63ef58abdea03cc5da4522a golangci-lint- # This is the builder on PPA that will build Go itself (inception-y), don't modify! # # This version is fine to be old and full of security holes, we just use it -# to build the latest Go. Don't change it. If it ever becomes insufficient, -# we need to switch over to a recursive builder to jump across supported -# versions. +# to build the latest Go. Don't change it. # -# version:ppa-builder 1.19.6 +# version:ppa-builder-1 1.19.6 # https://go.dev/dl/ d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz + +# version:ppa-builder-2 1.21.9 +# https://go.dev/dl/ +58f0c5ced45a0012bce2ff7a9df03e128abcc8818ebabe5027bb92bafe20e421 go1.21.9.src.tar.gz diff --git a/build/ci.go b/build/ci.go index 4d8dba6ce2..9a2532f51f 100644 --- a/build/ci.go +++ b/build/ci.go @@ -117,23 +117,15 @@ var ( debEthereum, } - // Distros for which packages are created. - // Note: vivid is unsupported because there is no golang-1.6 package for it. - // Note: the following Ubuntu releases have been officially deprecated on Launchpad: - // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish, - // kinetic, lunar - debDistroGoBoots = map[string]string{ - "trusty": "golang-1.11", // 14.04, EOL: 04/2024 - "xenial": "golang-go", // 16.04, EOL: 04/2026 - "bionic": "golang-go", // 18.04, EOL: 04/2028 - "focal": "golang-go", // 20.04, EOL: 04/2030 - "jammy": "golang-go", // 22.04, EOL: 04/2032 - "mantic": "golang-go", // 23.10, EOL: 07/2024 - } + // Distros for which packages are created + debDistros = []string{ + "xenial", // 16.04, EOL: 04/2026 + "bionic", // 18.04, EOL: 04/2028 + "focal", // 20.04, EOL: 04/2030 + "jammy", // 22.04, EOL: 04/2032 + "noble", // 24.04, EOL: 04/2034 - debGoBootPaths = map[string]string{ - "golang-1.11": "/usr/lib/go-1.11", - "golang-go": "/usr/lib/go", + "mantic", // 23.10, EOL: 07/2024 } // This is where the tests should be unpacked. @@ -694,8 +686,8 @@ func doDebianSource(cmdline []string) { } // Download and verify the Go source packages. var ( - gobootbundle = downloadGoBootstrapSources(*cachedir) - gobundle = downloadGoSources(*cachedir) + gobootbundles = downloadGoBootstrapSources(*cachedir) + gobundle = downloadGoSources(*cachedir) ) // Download all the dependencies needed to build the sources and run the ci script srcdepfetch := tc.Go("mod", "download") @@ -708,17 +700,19 @@ func doDebianSource(cmdline []string) { // Create Debian packages and upload them. for _, pkg := range debPackages { - for distro, goboot := range debDistroGoBoots { + for _, distro := range debDistros { // Prepare the debian package with the go-ethereum sources. - meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables) + meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables) pkgdir := stageDebianSource(*workdir, meta) // Add bootstrapper Go source code - if err := build.ExtractArchive(gobootbundle, pkgdir); err != nil { - log.Fatalf("Failed to extract bootstrapper Go sources: %v", err) - } - if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".goboot")); err != nil { - log.Fatalf("Failed to rename bootstrapper Go source folder: %v", err) + for i, gobootbundle := range gobootbundles { + if err := build.ExtractArchive(gobootbundle, pkgdir); err != nil { + log.Fatalf("Failed to extract bootstrapper Go sources: %v", err) + } + if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, fmt.Sprintf(".goboot-%d", i+1))); err != nil { + log.Fatalf("Failed to rename bootstrapper Go source folder: %v", err) + } } // Add builder Go source code if err := build.ExtractArchive(gobundle, pkgdir); err != nil { @@ -754,21 +748,26 @@ func doDebianSource(cmdline []string) { } } -// downloadGoBootstrapSources downloads the Go source tarball that will be used +// downloadGoBootstrapSources downloads the Go source tarball(s) that will be used // to bootstrap the builder Go. -func downloadGoBootstrapSources(cachedir string) string { +func downloadGoBootstrapSources(cachedir string) []string { csdb := build.MustLoadChecksums("build/checksums.txt") - gobootVersion, err := build.Version(csdb, "ppa-builder") - if err != nil { - log.Fatal(err) - } - file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion) - url := "https://dl.google.com/go/" + file - dst := filepath.Join(cachedir, file) - if err := csdb.DownloadFile(url, dst); err != nil { - log.Fatal(err) + + var bundles []string + for _, booter := range []string{"ppa-builder-1", "ppa-builder-2"} { + gobootVersion, err := build.Version(csdb, booter) + if err != nil { + log.Fatal(err) + } + file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion) + url := "https://dl.google.com/go/" + file + dst := filepath.Join(cachedir, file) + if err := csdb.DownloadFile(url, dst); err != nil { + log.Fatal(err) + } + bundles = append(bundles, dst) } - return dst + return bundles } // downloadGoSources downloads the Go source tarball. @@ -846,10 +845,7 @@ type debPackage struct { } type debMetadata struct { - Env build.Environment - GoBootPackage string - GoBootPath string - + Env build.Environment PackageName string // go-ethereum version being built. Note that this @@ -877,21 +873,19 @@ func (d debExecutable) Package() string { return d.BinaryName } -func newDebMetadata(distro, goboot, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata { +func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata { if author == "" { // No signing key, use default author. author = "Ethereum Builds " } return debMetadata{ - GoBootPackage: goboot, - GoBootPath: debGoBootPaths[goboot], - PackageName: name, - Env: env, - Author: author, - Distro: distro, - Version: version, - Time: t.Format(time.RFC1123Z), - Executables: exes, + PackageName: name, + Env: env, + Author: author, + Distro: distro, + Version: version, + Time: t.Format(time.RFC1123Z), + Executables: exes, } } diff --git a/build/deb/ethereum/deb.control b/build/deb/ethereum/deb.control index 3b759f2d04..333e954c17 100644 --- a/build/deb/ethereum/deb.control +++ b/build/deb/ethereum/deb.control @@ -2,7 +2,7 @@ Source: {{.Name}} Section: science Priority: extra Maintainer: {{.Author}} -Build-Depends: debhelper (>= 8.0.0), {{.GoBootPackage}} +Build-Depends: debhelper (>= 8.0.0), golang-go Standards-Version: 3.9.5 Homepage: https://ethereum.org Vcs-Git: https://github.com/ethereum/go-ethereum.git diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules index daca793e55..3287e15ff0 100644 --- a/build/deb/ethereum/deb.rules +++ b/build/deb/ethereum/deb.rules @@ -7,7 +7,7 @@ # Launchpad rejects Go's access to $HOME, use custom folders export GOCACHE=/tmp/go-build export GOPATH=/tmp/gopath -export GOROOT_BOOTSTRAP={{.GoBootPath}} +export GOROOT_BOOTSTRAP=/usr/lib/go override_dh_auto_clean: # Don't try to be smart Launchpad, we know our build rules better than you @@ -19,8 +19,9 @@ override_dh_auto_build: # # We're also shipping the bootstrapper as of Go 1.20 as it had minimum version # requirements opposed to older versions of Go. - (mv .goboot ../ && cd ../.goboot/src && ./make.bash) - (mv .go ../ && cd ../.go/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot ./make.bash) + (mv .goboot-1 ../ && cd ../.goboot-1/src && ./make.bash) + (mv .goboot-2 ../ && cd ../.goboot-2/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot-1 ./make.bash) + (mv .go ../ && cd ../.go/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot-2 ./make.bash) # We can't download external go modules within Launchpad, so we're shipping the # entire dependency source cache with go-ethereum. diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go index 8ff3f1f71a..4f1b6f8656 100644 --- a/cmd/devp2p/internal/ethtest/snap.go +++ b/cmd/devp2p/internal/ethtest/snap.go @@ -32,7 +32,6 @@ import ( "github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" - "golang.org/x/crypto/sha3" ) func (c *Conn) snapRequest(code uint64, msg any) (any, error) { @@ -905,7 +904,7 @@ func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error { // that the serving node is missing var ( bytecodes = res.Codes - hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher = crypto.NewKeccakState() hash = make([]byte, 32) codes = make([][]byte, len(req.Hashes)) ) @@ -964,7 +963,7 @@ func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error { // Cross reference the requested trienodes with the response to find gaps // that the serving node is missing - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() hash := make([]byte, 32) trienodes := res.Nodes if got, want := len(trienodes), len(tc.expHashes); got != want { diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go index 62c8593a1d..37a6db9ffc 100644 --- a/cmd/evm/internal/t8ntool/block.go +++ b/cmd/evm/internal/t8ntool/block.go @@ -160,7 +160,7 @@ func (i *bbInput) ToBlock() *types.Block { if i.Header.Difficulty != nil { header.Difficulty = i.Header.Difficulty } - return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals) + return types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: i.Txs, Uncles: i.Ommers, Withdrawals: i.Withdrawals}) } // SealBlock seals the given block using the configured engine. diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index ed7144ef80..a46f8a0667 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -296,7 +296,7 @@ func (g Alloc) OnAccount(addr *common.Address, dumpAccount state.DumpAccount) { balance, _ := new(big.Int).SetString(dumpAccount.Balance, 0) var storage map[common.Hash]common.Hash if dumpAccount.Storage != nil { - storage = make(map[common.Hash]common.Hash) + storage = make(map[common.Hash]common.Hash, len(dumpAccount.Storage)) for k, v := range dumpAccount.Storage { storage[k] = common.HexToHash(v) } diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index 0298ad06fd..091c37e457 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -132,7 +132,6 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) { attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH }) attach.SetTemplateFunc("gover", runtime.Version) attach.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") }) - attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase }) attach.SetTemplateFunc("niltime", func() string { return time.Unix(1548854791, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)") }) diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 4e91a4ff25..742eadd5f3 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -246,11 +246,17 @@ func removeDB(ctx *cli.Context) error { ancientDir = config.Node.ResolvePath(ancientDir) } // Delete state data - statePaths := []string{rootDir, filepath.Join(ancientDir, rawdb.StateFreezerName)} + statePaths := []string{ + rootDir, + filepath.Join(ancientDir, rawdb.StateFreezerName), + } confirmAndRemoveDB(statePaths, "state data", ctx, removeStateDataFlag.Name) // Delete ancient chain - chainPaths := []string{filepath.Join(ancientDir, rawdb.ChainFreezerName)} + chainPaths := []string{filepath.Join( + ancientDir, + rawdb.ChainFreezerName, + )} confirmAndRemoveDB(chainPaths, "ancient chain", ctx, removeChainDataFlag.Name) return nil } diff --git a/cmd/geth/logging_test.go b/cmd/geth/logging_test.go index e56c679dae..07d8d98a46 100644 --- a/cmd/geth/logging_test.go +++ b/cmd/geth/logging_test.go @@ -75,6 +75,7 @@ func testConsoleLogging(t *testing.T, format string, tStart, tEnd int) { if err != nil { t.Fatal(err) } + defer readFile.Close() wantLines := split(readFile) haveLines := split(bytes.NewBuffer(haveB)) for i, want := range wantLines { @@ -111,6 +112,7 @@ func TestJsonLogging(t *testing.T) { if err != nil { t.Fatal(err) } + defer readFile.Close() wantLines := split(readFile) haveLines := split(bytes.NewBuffer(haveB)) for i, wantLine := range wantLines { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 8c101d8dfb..d8171bc94a 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1872,13 +1872,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { Fatalf("Could not read genesis from database: %v", err) } if !genesis.Config.TerminalTotalDifficultyPassed { - Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficultyPassed must be true in developer mode") + Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficultyPassed must be true") } if genesis.Config.TerminalTotalDifficulty == nil { - Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be specified.") + Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be specified") + } else if genesis.Config.TerminalTotalDifficulty.Cmp(big.NewInt(0)) != 0 { + Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be 0") } - if genesis.Difficulty.Cmp(genesis.Config.TerminalTotalDifficulty) != 1 { - Fatalf("Bad developer-mode genesis configuration: genesis block difficulty must be > terminalTotalDifficulty") + if genesis.Difficulty.Cmp(big.NewInt(0)) != 0 { + Fatalf("Bad developer-mode genesis configuration: difficulty must be 0") } } chaindb.Close() diff --git a/cmd/utils/history_test.go b/cmd/utils/history_test.go index 7ef6c51575..eb9cee9b11 100644 --- a/cmd/utils/history_test.go +++ b/cmd/utils/history_test.go @@ -162,8 +162,7 @@ func TestHistoryImportAndExport(t *testing.T) { } // Now import Era. - freezer := t.TempDir() - db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) + db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { panic(err) } diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 0d99877bb4..30078b4511 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -389,7 +389,7 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea header.Root = state.IntermediateRoot(true) // Assemble and return the final block. - return types.NewBlockWithWithdrawals(header, body.Transactions, body.Uncles, receipts, body.Withdrawals, trie.NewStackTrie(nil)), nil + return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)), nil } // Seal generates a new sealing request for the given input block and pushes diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 51c6f9fa2d..a5fe5360d6 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -598,7 +598,7 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header * header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Assemble and return the final block for sealing. - return types.NewBlock(header, body.Transactions, nil, receipts, trie.NewStackTrie(nil)), nil + return types.NewBlock(header, &types.Body{Transactions: body.Transactions}, receipts, trie.NewStackTrie(nil)), nil } // Authorize injects a private key into the consensus engine to mint new blocks diff --git a/consensus/clique/snapshot.go b/consensus/clique/snapshot.go index 8ff0b3a70f..d0b15e9489 100644 --- a/consensus/clique/snapshot.go +++ b/consensus/clique/snapshot.go @@ -19,6 +19,7 @@ package clique import ( "bytes" "encoding/json" + "maps" "slices" "time" @@ -108,28 +109,16 @@ func (s *Snapshot) store(db ethdb.Database) error { // copy creates a deep copy of the snapshot, though not the individual votes. func (s *Snapshot) copy() *Snapshot { - cpy := &Snapshot{ + return &Snapshot{ config: s.config, sigcache: s.sigcache, Number: s.Number, Hash: s.Hash, - Signers: make(map[common.Address]struct{}), - Recents: make(map[uint64]common.Address), - Votes: make([]*Vote, len(s.Votes)), - Tally: make(map[common.Address]Tally), - } - for signer := range s.Signers { - cpy.Signers[signer] = struct{}{} + Signers: maps.Clone(s.Signers), + Recents: maps.Clone(s.Recents), + Votes: slices.Clone(s.Votes), + Tally: maps.Clone(s.Tally), } - for block, signer := range s.Recents { - cpy.Recents[block] = signer - } - for address, tally := range s.Tally { - cpy.Tally[address] = tally - } - copy(cpy.Votes, s.Votes) - - return cpy } // validVote returns whether it makes sense to cast the specified vote in the diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index f9ef1636b7..71df009988 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -521,7 +521,7 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlock(header, body.Transactions, body.Uncles, receipts, trie.NewStackTrie(nil)), nil + return types.NewBlock(header, &types.Body{Transactions: body.Transactions, Uncles: body.Uncles}, receipts, trie.NewStackTrie(nil)), nil } // SealHash returns the hash of a block prior to it being sealed. diff --git a/core/block_validator_test.go b/core/block_validator_test.go index 680ad4399d..9b43d682b9 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -154,12 +154,10 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) { preHeaders := make([]*types.Header, len(preBlocks)) for i, block := range preBlocks { preHeaders[i] = block.Header() - t.Logf("Pre-merge header: %d", block.NumberU64()) } postHeaders := make([]*types.Header, len(postBlocks)) for i, block := range postBlocks { postHeaders[i] = block.Header() - t.Logf("Post-merge header: %d", block.NumberU64()) } // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, nil, gspec, nil, engine, vm.Config{}, nil, nil) diff --git a/core/blockchain.go b/core/blockchain.go index 652dd96e9e..f55adf0f58 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -69,7 +69,6 @@ var ( accountCommitTimer = metrics.NewRegisteredResettingTimer("chain/account/commits", nil) storageReadTimer = metrics.NewRegisteredResettingTimer("chain/storage/reads", nil) - storageHashTimer = metrics.NewRegisteredResettingTimer("chain/storage/hashes", nil) storageUpdateTimer = metrics.NewRegisteredResettingTimer("chain/storage/updates", nil) storageCommitTimer = metrics.NewRegisteredResettingTimer("chain/storage/commits", nil) @@ -102,10 +101,8 @@ const ( blockCacheLimit = 256 receiptsCacheLimit = 32 txLookupCacheLimit = 1024 - maxFutureBlocks = 256 - maxTimeFutureBlocks = 30 DefaultTriesInMemory = 128 - TriesInMemory = 128 + // BlockChainVersion ensures that an incompatible database forces a resync from scratch. // // Changelog: @@ -159,8 +156,8 @@ type CacheConfig struct { } // arbitrum: exposing CacheConfig.triedbConfig to be used by Nitro when initializing arbos in database -func (c *CacheConfig) TriedbConfig() *triedb.Config { - return c.triedbConfig() +func (c *CacheConfig) TriedbConfig(isVerkle bool) *triedb.Config { + return c.triedbConfig(isVerkle) } // triedbConfig derives the configures for trie database. @@ -864,6 +861,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha // Track the block number of the requested root hash rootNumber uint64 // (no root == always 0) rootFound bool + // Retrieve the last pivot block to short circuit rollbacks beyond it // and the current freezer limit to start nuking it's underflown. pivot = rawdb.ReadLastPivotNumber(bc.db) @@ -1375,7 +1373,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ // Delete block data from the main database. var ( batch = bc.db.NewBatch() - canonHashes = make(map[common.Hash]struct{}) + canonHashes = make(map[common.Hash]struct{}, len(blockChain)) ) for _, block := range blockChain { canonHashes[block.Hash()] = struct{}{} @@ -1518,7 +1516,7 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error { // writeBlockWithState writes block, metadata and corresponding state data to the // database. -func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error { +func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB) error { // Calculate the total difficulty of the block ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) if ptd == nil { @@ -1535,12 +1533,12 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd) rawdb.WriteBlock(blockBatch, block) rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) - rawdb.WritePreimages(blockBatch, state.Preimages()) + rawdb.WritePreimages(blockBatch, statedb.Preimages()) if err := blockBatch.Write(); err != nil { log.Crit("Failed to write block into disk", "err", err) } // Commit all cached state changes into underlying memory database. - root, err := state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number())) + root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number())) if err != nil { return err } @@ -2037,8 +2035,7 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation) storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation) accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation) - storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete(in validation) - triehash := statedb.AccountHashes + statedb.StorageHashes // The time spent on tries hashing + triehash := statedb.AccountHashes // The time spent on tries hashing trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update trieRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read trieRead += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read @@ -2065,7 +2062,7 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them - blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits) + blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits) blockInsertTimer.UpdateSince(start) return &blockProcessingResult{usedGas: usedGas, procTime: proctime, status: status}, nil diff --git a/core/blockchain_test.go b/core/blockchain_test.go index c4b0e52e56..16fd44407d 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -785,7 +785,7 @@ func testFastVsFullChains(t *testing.T, scheme string) { t.Fatalf("failed to insert receipt %d: %v", n, err) } // Freezer style fast import the chain. - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -875,12 +875,12 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { BaseFee: big.NewInt(params.InitialBaseFee), } ) - height := uint64(1024) + height := uint64(64) _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil) // makeDb creates a db instance for testing. makeDb := func() ethdb.Database { - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1712,7 +1712,7 @@ func TestTrieForkGC(t *testing.T) { Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee), } - genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*DefaultTriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) + genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) // Generate a bunch of fork blocks, each side forking from the canonical chain forks := make([]*types.Block, len(blocks)) @@ -1740,7 +1740,7 @@ func TestTrieForkGC(t *testing.T) { } } // Dereference all the recent tries and ensure no past trie is left in - for i := 0; i < DefaultTriesInMemory; i++ { + for i := 0; i < state.TriesInMemory; i++ { chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) } @@ -1764,11 +1764,11 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { BaseFee: big.NewInt(params.InitialBaseFee), } genDb, shared, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) - original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*DefaultTriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) - competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*DefaultTriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) + original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) + competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) // Import the shared chain and the original canonical one - db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) defer db.Close() chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), nil, genesis, nil, engine, vm.Config{}, nil, nil) @@ -1804,7 +1804,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { } // In path-based trie database implementation, it will keep 128 diff + 1 disk // layers, totally 129 latest states available. In hash-based it's 128. - states := DefaultTriesInMemory + states := state.TriesInMemory if scheme == rawdb.PathScheme { states = states + 1 } @@ -1833,7 +1833,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) { funds = big.NewInt(1000000000) gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{address: {Balance: funds}}} ) - height := uint64(1024) + height := uint64(64) _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil) // Import the chain as a ancient-first node and ensure all pointers are updated @@ -1908,7 +1908,7 @@ func testInsertReceiptChainRollback(t *testing.T, scheme string) { } // Set up a BlockChain that uses the ancient store. - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1972,13 +1972,13 @@ func testLowDiffLongChain(t *testing.T, scheme string) { } // We must use a pretty long chain to ensure that the fork doesn't overtake us // until after at least 128 blocks post tip - genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*DefaultTriesInMemory, func(i int, b *BlockGen) { + genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) b.OffsetTime(-9) }) // Import the canonical chain - diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) defer diskdb.Close() chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), nil, genesis, nil, engine, vm.Config{}, nil, nil) @@ -1992,7 +1992,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) { } // Generate fork chain, starting from an early block parent := blocks[10] - fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*DefaultTriesInMemory, func(i int, b *BlockGen) { + fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) @@ -2055,7 +2055,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon // Set the terminal total difficulty in the config gspec.Config.TerminalTotalDifficulty = big.NewInt(0) } - genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*DefaultTriesInMemory, func(i int, gen *BlockGen) { + genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*state.TriesInMemory, func(i int, gen *BlockGen) { tx, err := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("deadbeef"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key) if err != nil { t.Fatalf("failed to create tx: %v", err) @@ -2070,9 +2070,9 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon t.Fatalf("block %d: failed to insert into chain: %v", n, err) } - lastPrunedIndex := len(blocks) - DefaultTriesInMemory - 1 + lastPrunedIndex := len(blocks) - state.TriesInMemory - 1 lastPrunedBlock := blocks[lastPrunedIndex] - firstNonPrunedBlock := blocks[len(blocks)-DefaultTriesInMemory] + firstNonPrunedBlock := blocks[len(blocks)-state.TriesInMemory] // Verify pruning of lastPrunedBlock if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) { @@ -2099,7 +2099,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon // Generate fork chain, make it longer than canon parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock parent := blocks[parentIndex] - fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*DefaultTriesInMemory, func(i int, b *BlockGen) { + fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) if int(b.header.Number.Uint64()) >= mergeBlock { b.SetPoS() @@ -2190,7 +2190,7 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) { b.OffsetTime(-9) // A higher difficulty }) // Import the shared chain and the original canonical one - chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2361,7 +2361,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i } }) // Import the shared chain and the original canonical one - chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2742,7 +2742,7 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { BaseFee: big.NewInt(params.InitialBaseFee), } // Generate and import the canonical chain - _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*DefaultTriesInMemory, nil) + _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), nil, genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { @@ -2755,9 +2755,9 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { } // In path-based trie database implementation, it will keep 128 diff + 1 disk // layers, totally 129 latest states available. In hash-based it's 128. - states := DefaultTriesInMemory + states := state.TriesInMemory if scheme == rawdb.PathScheme { - states = DefaultTriesInMemory + 1 + states = state.TriesInMemory + 1 } lastPrunedIndex := len(blocks) - states - 1 lastPrunedBlock := blocks[lastPrunedIndex] @@ -3634,18 +3634,19 @@ func testSetCanonical(t *testing.T, scheme string) { Alloc: types.GenesisAlloc{address: {Balance: funds}}, BaseFee: big.NewInt(params.InitialBaseFee), } - signer = types.LatestSigner(gspec.Config) - engine = ethash.NewFaker() + signer = types.LatestSigner(gspec.Config) + engine = ethash.NewFaker() + chainLength = 10 ) // Generate and import the canonical chain - _, canon, _ := GenerateChainWithGenesis(gspec, engine, 2*DefaultTriesInMemory, func(i int, gen *BlockGen) { + _, canon, _ := GenerateChainWithGenesis(gspec, engine, chainLength, func(i int, gen *BlockGen) { tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key) if err != nil { panic(err) } gen.AddTx(tx) }) - diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) defer diskdb.Close() chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), nil, gspec, nil, engine, vm.Config{}, nil, nil) @@ -3659,7 +3660,7 @@ func testSetCanonical(t *testing.T, scheme string) { } // Generate the side chain and import them - _, side, _ := GenerateChainWithGenesis(gspec, engine, 2*DefaultTriesInMemory, func(i int, gen *BlockGen) { + _, side, _ := GenerateChainWithGenesis(gspec, engine, chainLength, func(i int, gen *BlockGen) { tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1), params.TxGas, gen.header.BaseFee, nil), signer, key) if err != nil { panic(err) @@ -3698,8 +3699,8 @@ func testSetCanonical(t *testing.T, scheme string) { verify(side[len(side)-1]) // Reset the chain head to original chain - chain.SetCanonical(canon[DefaultTriesInMemory-1]) - verify(canon[DefaultTriesInMemory-1]) + chain.SetCanonical(canon[chainLength-1]) + verify(canon[chainLength-1]) } // TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted diff --git a/core/genesis.go b/core/genesis.go index 739c8368de..4e59de9c80 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -477,7 +477,7 @@ func (g *Genesis) ToBlock() *types.Block { } } } - return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)).WithWithdrawals(withdrawals) + return types.NewBlock(head, &types.Body{Withdrawals: withdrawals}, nil, trie.NewStackTrie(nil)) } // Commit writes the block and state of a genesis specification to the database. diff --git a/core/genesis_test.go b/core/genesis_test.go index 0e4c3ae4b0..4fbabf00fd 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -322,7 +322,7 @@ func TestVerkleGenesisCommit(t *testing.T) { t.Fatalf("expected trie to be verkle") } - if !rawdb.ExistsAccountTrieNode(db, nil) { + if !rawdb.HasAccountTrieNode(db, nil) { t.Fatal("could not find node") } } diff --git a/core/mkalloc.go b/core/mkalloc.go index 201c2fe7de..cc4955f038 100644 --- a/core/mkalloc.go +++ b/core/mkalloc.go @@ -101,6 +101,7 @@ func main() { if err != nil { panic(err) } + defer file.Close() if err := json.NewDecoder(file).Decode(g); err != nil { panic(err) } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 128b33579d..bd1e9c05d6 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -786,7 +786,7 @@ func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block { if body == nil { return nil } - return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles).WithWithdrawals(body.Withdrawals) + return types.NewBlockWithHeader(header).WithBody(*body) } // WriteBlock serializes a block into the database, header and body separately. @@ -876,7 +876,11 @@ func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block { } for _, bad := range badBlocks { if bad.Header.Hash() == hash { - return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles).WithWithdrawals(bad.Body.Withdrawals) + block := types.NewBlockWithHeader(bad.Header) + if bad.Body != nil { + block = block.WithBody(*bad.Body) + } + return block } } return nil @@ -895,7 +899,11 @@ func ReadAllBadBlocks(db ethdb.Reader) []*types.Block { } var blocks []*types.Block for _, bad := range badBlocks { - blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles).WithWithdrawals(bad.Body.Withdrawals)) + block := types.NewBlockWithHeader(bad.Header) + if bad.Body != nil { + block = block.WithBody(*bad.Body) + } + blocks = append(blocks, block) } return blocks } diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index a7ceb72998..fdc940b57e 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -640,7 +640,7 @@ func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block { Number: big.NewInt(int64(i)), Extra: []byte("test block"), } - blocks[i] = types.NewBlockWithHeader(header).WithBody(txs, nil) + blocks[i] = types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs}) blocks[i].Hash() // pre-cache the block hash } return blocks diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go index 124389ba7a..78dba000fc 100644 --- a/core/rawdb/accessors_indexes_test.go +++ b/core/rawdb/accessors_indexes_test.go @@ -76,7 +76,7 @@ func TestLookupStorage(t *testing.T) { tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) txs := []*types.Transaction{tx1, tx2, tx3} - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newTestHasher()) + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, &types.Body{Transactions: txs}, nil, newTestHasher()) // Check that no transactions entries are in a pristine database for i, tx := range txs { diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go index e34b24fd76..44eb715d04 100644 --- a/core/rawdb/accessors_trie.go +++ b/core/rawdb/accessors_trie.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "golang.org/x/crypto/sha3" ) // HashScheme is the legacy hash-based state scheme with which trie nodes are @@ -50,7 +49,7 @@ const PathScheme = "path" type hasher struct{ sha crypto.KeccakState } var hasherPool = sync.Pool{ - New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, + New: func() interface{} { return &hasher{sha: crypto.NewKeccakState()} }, } func newHasher() *hasher { @@ -65,33 +64,15 @@ func (h *hasher) release() { hasherPool.Put(h) } -// ReadAccountTrieNode retrieves the account trie node and the associated node -// hash with the specified node path. -func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) { - data, err := db.Get(accountTrieNodeKey(path)) - if err != nil { - return nil, common.Hash{} - } - h := newHasher() - defer h.release() - return data, h.hash(data) -} - -// HasAccountTrieNode checks the account trie node presence with the specified -// node path and the associated node hash. -func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) bool { - data, err := db.Get(accountTrieNodeKey(path)) - if err != nil { - return false - } - h := newHasher() - defer h.release() - return h.hash(data) == hash +// ReadAccountTrieNode retrieves the account trie node with the specified node path. +func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) []byte { + data, _ := db.Get(accountTrieNodeKey(path)) + return data } -// ExistsAccountTrieNode checks the presence of the account trie node with the +// HasAccountTrieNode checks the presence of the account trie node with the // specified node path, regardless of the node hash. -func ExistsAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool { +func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool { has, err := db.Has(accountTrieNodeKey(path)) if err != nil { return false @@ -113,33 +94,15 @@ func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) { } } -// ReadStorageTrieNode retrieves the storage trie node and the associated node -// hash with the specified node path. -func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) { - data, err := db.Get(storageTrieNodeKey(accountHash, path)) - if err != nil { - return nil, common.Hash{} - } - h := newHasher() - defer h.release() - return data, h.hash(data) -} - -// HasStorageTrieNode checks the storage trie node presence with the provided -// node path and the associated node hash. -func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte, hash common.Hash) bool { - data, err := db.Get(storageTrieNodeKey(accountHash, path)) - if err != nil { - return false - } - h := newHasher() - defer h.release() - return h.hash(data) == hash +// ReadStorageTrieNode retrieves the storage trie node with the specified node path. +func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) []byte { + data, _ := db.Get(storageTrieNodeKey(accountHash, path)) + return data } -// ExistsStorageTrieNode checks the presence of the storage trie node with the +// HasStorageTrieNode checks the presence of the storage trie node with the // specified account hash and node path, regardless of the node hash. -func ExistsStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool { +func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool { has, err := db.Has(storageTrieNodeKey(accountHash, path)) if err != nil { return false @@ -198,10 +161,18 @@ func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash c case HashScheme: return HasLegacyTrieNode(db, hash) case PathScheme: + var blob []byte if owner == (common.Hash{}) { - return HasAccountTrieNode(db, path, hash) + blob = ReadAccountTrieNode(db, path) + } else { + blob = ReadStorageTrieNode(db, owner, path) } - return HasStorageTrieNode(db, owner, path, hash) + if len(blob) == 0 { + return false + } + h := newHasher() + defer h.release() + return h.hash(blob) == hash // exists but not match default: panic(fmt.Sprintf("Unknown scheme %v", scheme)) } @@ -209,43 +180,35 @@ func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash c // ReadTrieNode retrieves the trie node from database with the provided node info // and associated node hash. -// hashScheme-based lookup requires the following: -// - hash -// -// pathScheme-based lookup requires the following: -// - owner -// - path func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte { switch scheme { case HashScheme: return ReadLegacyTrieNode(db, hash) case PathScheme: - var ( - blob []byte - nHash common.Hash - ) + var blob []byte if owner == (common.Hash{}) { - blob, nHash = ReadAccountTrieNode(db, path) + blob = ReadAccountTrieNode(db, path) } else { - blob, nHash = ReadStorageTrieNode(db, owner, path) + blob = ReadStorageTrieNode(db, owner, path) } - if nHash != hash { + if len(blob) == 0 { return nil } + h := newHasher() + defer h.release() + if h.hash(blob) != hash { + return nil // exists but not match + } return blob default: panic(fmt.Sprintf("Unknown scheme %v", scheme)) } } -// WriteTrieNode writes the trie node into database with the provided node info -// and associated node hash. -// hashScheme-based lookup requires the following: -// - hash +// WriteTrieNode writes the trie node into database with the provided node info. // -// pathScheme-based lookup requires the following: -// - owner -// - path +// hash-scheme requires the node hash as the identifier. +// path-scheme requires the node owner and path as the identifier. func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte, scheme string) { switch scheme { case HashScheme: @@ -261,14 +224,10 @@ func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash } } -// DeleteTrieNode deletes the trie node from database with the provided node info -// and associated node hash. -// hashScheme-based lookup requires the following: -// - hash +// DeleteTrieNode deletes the trie node from database with the provided node info. // -// pathScheme-based lookup requires the following: -// - owner -// - path +// hash-scheme requires the node hash as the identifier. +// path-scheme requires the node owner and path as the identifier. func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, scheme string) { switch scheme { case HashScheme: @@ -287,9 +246,8 @@ func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, has // ReadStateScheme reads the state scheme of persistent state, or none // if the state is not present in database. func ReadStateScheme(db ethdb.Reader) string { - // Check if state in path-based scheme is present - blob, _ := ReadAccountTrieNode(db, nil) - if len(blob) != 0 { + // Check if state in path-based scheme is present. + if HasAccountTrieNode(db, nil) { return PathScheme } // The root node might be deleted during the initial snap sync, check @@ -304,8 +262,7 @@ func ReadStateScheme(db ethdb.Reader) string { if header == nil { return "" // empty datadir } - blob = ReadLegacyTrieNode(db, header.Root) - if len(blob) == 0 { + if !HasLegacyTrieNode(db, header.Root) { return "" // no state in disk } return HashScheme diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go index e88867af0e..44867ded04 100644 --- a/core/rawdb/ancient_scheme.go +++ b/core/rawdb/ancient_scheme.go @@ -16,7 +16,11 @@ package rawdb -import "path/filepath" +import ( + "path/filepath" + + "github.com/ethereum/go-ethereum/ethdb" +) // The list of table names of chain freezer. const ( @@ -75,7 +79,15 @@ var ( // freezers the collections of all builtin freezers. var freezers = []string{ChainFreezerName, StateFreezerName} -// NewStateFreezer initializes the freezer for state history. -func NewStateFreezer(ancientDir string, readOnly bool) (*ResettableFreezer, error) { - return NewResettableFreezer(filepath.Join(ancientDir, StateFreezerName), "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerNoSnappy) +// NewStateFreezer initializes the ancient store for state history. +// +// - if the empty directory is given, initializes the pure in-memory +// state freezer (e.g. dev mode). +// - if non-empty directory is given, initializes the regular file-based +// state freezer. +func NewStateFreezer(ancientDir string, readOnly bool) (ethdb.ResettableAncientStore, error) { + if ancientDir == "" { + return NewMemoryFreezer(readOnly, stateFreezerNoSnappy), nil + } + return newResettableFreezer(filepath.Join(ancientDir, StateFreezerName), "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerNoSnappy) } diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go index 428cda544b..1c69639c9d 100644 --- a/core/rawdb/ancient_utils.go +++ b/core/rawdb/ancient_utils.go @@ -89,20 +89,17 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) { infos = append(infos, info) case StateFreezerName: - if ReadStateScheme(db) != PathScheme { - continue - } datadir, err := db.AncientDatadir() if err != nil { return nil, err } f, err := NewStateFreezer(datadir, true) if err != nil { - return nil, err + continue // might be possible the state freezer is not existent } defer f.Close() - info, err := inspect(StateFreezerName, stateFreezerNoSnappy, f) + info, err := inspect(freezer, stateFreezerNoSnappy, f) if err != nil { return nil, err } diff --git a/core/rawdb/ancienttest/testsuite.go b/core/rawdb/ancienttest/testsuite.go new file mode 100644 index 0000000000..70de263c04 --- /dev/null +++ b/core/rawdb/ancienttest/testsuite.go @@ -0,0 +1,325 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ancienttest + +import ( + "bytes" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/internal/testrand" +) + +// TestAncientSuite runs a suite of tests against an ancient database +// implementation. +func TestAncientSuite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + // Test basic read methods + t.Run("BasicRead", func(t *testing.T) { basicRead(t, newFn) }) + + // Test batch read method + t.Run("BatchRead", func(t *testing.T) { batchRead(t, newFn) }) + + // Test basic write methods + t.Run("BasicWrite", func(t *testing.T) { basicWrite(t, newFn) }) + + // Test if data mutation is allowed after db write + t.Run("nonMutable", func(t *testing.T) { nonMutable(t, newFn) }) +} + +func basicRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + var ( + db = newFn([]string{"a"}) + data = makeDataset(100, 32) + ) + defer db.Close() + + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < len(data); i++ { + op.AppendRaw("a", uint64(i), data[i]) + } + return nil + }) + db.TruncateTail(10) + db.TruncateHead(90) + + // Test basic tail and head retrievals + tail, err := db.Tail() + if err != nil || tail != 10 { + t.Fatal("Failed to retrieve tail") + } + ancient, err := db.Ancients() + if err != nil || ancient != 90 { + t.Fatal("Failed to retrieve ancient") + } + + // Test the deleted items shouldn't be reachable + var cases = []struct { + start int + limit int + }{ + {0, 10}, + {90, 100}, + } + for _, c := range cases { + for i := c.start; i < c.limit; i++ { + exist, err := db.HasAncient("a", uint64(i)) + if err != nil { + t.Fatalf("Failed to check presence, %v", err) + } + if exist { + t.Fatalf("Item %d is already truncated", uint64(i)) + } + _, err = db.Ancient("a", uint64(i)) + if err == nil { + t.Fatal("Error is expected for non-existent item") + } + } + } + + // Test the items in range should be reachable + for i := 10; i < 90; i++ { + exist, err := db.HasAncient("a", uint64(i)) + if err != nil { + t.Fatalf("Failed to check presence, %v", err) + } + if !exist { + t.Fatalf("Item %d is missing", uint64(i)) + } + blob, err := db.Ancient("a", uint64(i)) + if err != nil { + t.Fatalf("Failed to retrieve item, %v", err) + } + if !bytes.Equal(blob, data[i]) { + t.Fatalf("Unexpected item content, want: %v, got: %v", data[i], blob) + } + } + + // Test the items in unknown table shouldn't be reachable + exist, err := db.HasAncient("b", uint64(0)) + if err != nil { + t.Fatalf("Failed to check presence, %v", err) + } + if exist { + t.Fatal("Item in unknown table shouldn't be found") + } + _, err = db.Ancient("b", uint64(0)) + if err == nil { + t.Fatal("Error is expected for unknown table") + } +} + +func batchRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + var ( + db = newFn([]string{"a"}) + data = makeDataset(100, 32) + ) + defer db.Close() + + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), data[i]) + } + return nil + }) + db.TruncateTail(10) + db.TruncateHead(90) + + // Test the items in range should be reachable + var cases = []struct { + start uint64 + count uint64 + maxSize uint64 + expStart int + expLimit int + }{ + // Items in range [10, 90) with no size limitation + { + 10, 80, 0, 10, 90, + }, + // Items in range [10, 90) with 32 size cap, single item is expected + { + 10, 80, 32, 10, 11, + }, + // Items in range [10, 90) with 31 size cap, single item is expected + { + 10, 80, 31, 10, 11, + }, + // Items in range [10, 90) with 32*80 size cap, all items are expected + { + 10, 80, 32 * 80, 10, 90, + }, + // Extra items above the last item are not returned + { + 10, 90, 0, 10, 90, + }, + } + for i, c := range cases { + batch, err := db.AncientRange("a", c.start, c.count, c.maxSize) + if err != nil { + t.Fatalf("Failed to retrieve item in range, %v", err) + } + if !reflect.DeepEqual(batch, data[c.expStart:c.expLimit]) { + t.Fatalf("Case %d, Batch content is not matched", i) + } + } + + // Test out-of-range / zero-size retrieval should be rejected + _, err := db.AncientRange("a", 0, 1, 0) + if err == nil { + t.Fatal("Out-of-range retrieval should be rejected") + } + _, err = db.AncientRange("a", 90, 1, 0) + if err == nil { + t.Fatal("Out-of-range retrieval should be rejected") + } + _, err = db.AncientRange("a", 10, 0, 0) + if err == nil { + t.Fatal("Zero-size retrieval should be rejected") + } + + // Test item in unknown table shouldn't be reachable + _, err = db.AncientRange("b", 10, 1, 0) + if err == nil { + t.Fatal("Item in unknown table shouldn't be found") + } +} + +func basicWrite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + var ( + db = newFn([]string{"a", "b"}) + dataA = makeDataset(100, 32) + dataB = makeDataset(100, 32) + ) + defer db.Close() + + // The ancient write to tables should be aligned + _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), dataA[i]) + } + return nil + }) + if err == nil { + t.Fatal("Unaligned ancient write should be rejected") + } + + // Test normal ancient write + size, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), dataA[i]) + op.AppendRaw("b", uint64(i), dataB[i]) + } + return nil + }) + if err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } + wantSize := int64(6400) + if size != wantSize { + t.Fatalf("Ancient write size is not expected, want: %d, got: %d", wantSize, size) + } + + // Write should work after head truncating + db.TruncateHead(90) + _, err = db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 90; i < 100; i++ { + op.AppendRaw("a", uint64(i), dataA[i]) + op.AppendRaw("b", uint64(i), dataB[i]) + } + return nil + }) + if err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } + + // Write should work after truncating everything + db.TruncateTail(0) + _, err = db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), dataA[i]) + op.AppendRaw("b", uint64(i), dataB[i]) + } + return nil + }) + if err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } +} + +func nonMutable(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + db := newFn([]string{"a"}) + defer db.Close() + + // We write 100 zero-bytes to the freezer and immediately mutate the slice + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + data := make([]byte, 100) + op.AppendRaw("a", uint64(0), data) + for i := range data { + data[i] = 0xff + } + return nil + }) + // Now read it. + data, err := db.Ancient("a", uint64(0)) + if err != nil { + t.Fatal(err) + } + for k, v := range data { + if v != 0 { + t.Fatalf("byte %d != 0: %x", k, v) + } + } +} + +// TestResettableAncientSuite runs a suite of tests against a resettable ancient +// database implementation. +func TestResettableAncientSuite(t *testing.T, newFn func(kinds []string) ethdb.ResettableAncientStore) { + t.Run("Reset", func(t *testing.T) { + var ( + db = newFn([]string{"a"}) + data = makeDataset(100, 32) + ) + defer db.Close() + + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), data[i]) + } + return nil + }) + db.TruncateTail(10) + db.TruncateHead(90) + + // Ancient write should work after resetting + db.Reset() + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), data[i]) + } + return nil + }) + }) +} + +func makeDataset(size, value int) [][]byte { + var vals [][]byte + for i := 0; i < size; i += 1 { + vals = append(vals, testrand.Bytes(value)) + } + return vals +} diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index d8214874bd..7a0b819b6f 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -39,26 +39,40 @@ const ( freezerBatchLimit = 30000 ) -// chainFreezer is a wrapper of freezer with additional chain freezing feature. -// The background thread will keep moving ancient chain segments from key-value -// database to flat files for saving space on live database. +// chainFreezer is a wrapper of chain ancient store with additional chain freezing +// feature. The background thread will keep moving ancient chain segments from +// key-value database to flat files for saving space on live database. type chainFreezer struct { - *Freezer + ethdb.AncientStore // Ancient store for storing cold chain segment + quit chan struct{} wg sync.WaitGroup trigger chan chan struct{} // Manual blocking freeze trigger, test determinism } -// newChainFreezer initializes the freezer for ancient chain data. +// newChainFreezer initializes the freezer for ancient chain segment. +// +// - if the empty directory is given, initializes the pure in-memory +// state freezer (e.g. dev mode). +// - if non-empty directory is given, initializes the regular file-based +// state freezer. func newChainFreezer(datadir string, namespace string, readonly bool) (*chainFreezer, error) { - freezer, err := NewChainFreezer(datadir, namespace, readonly) + var ( + err error + freezer ethdb.AncientStore + ) + if datadir == "" { + freezer = NewMemoryFreezer(readonly, chainFreezerNoSnappy) + } else { + freezer, err = NewFreezer(datadir, namespace, readonly, freezerTableSize, chainFreezerNoSnappy) + } if err != nil { return nil, err } return &chainFreezer{ - Freezer: freezer, - quit: make(chan struct{}), - trigger: make(chan chan struct{}), + AncientStore: freezer, + quit: make(chan struct{}), + trigger: make(chan chan struct{}), }, nil } @@ -70,7 +84,7 @@ func (f *chainFreezer) Close() error { close(f.quit) } f.wg.Wait() - return f.Freezer.Close() + return f.AncientStore.Close() } // readHeadNumber returns the number of chain head block. 0 is returned if the @@ -167,7 +181,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { log.Debug("Current full block not old enough to freeze", "err", err) continue } - frozen := f.frozen.Load() + frozen, _ := f.Ancients() // no error will occur, safe to ignore // Short circuit if the blocks below threshold are already frozen. if frozen != 0 && frozen-1 >= threshold { @@ -190,7 +204,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { backoff = true continue } - // Batch of blocks have been frozen, flush them before wiping from leveldb + // Batch of blocks have been frozen, flush them before wiping from key-value store if err := f.Sync(); err != nil { log.Crit("Failed to flush frozen tables", "err", err) } @@ -210,7 +224,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { // Wipe out side chains also and track dangling side chains var dangling []common.Hash - frozen = f.frozen.Load() // Needs reload after during freezeRange + frozen, _ = f.Ancients() // Needs reload after during freezeRange for number := first; number < frozen; number++ { // Always keep the genesis block in active database if number != 0 { diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go index 78b0a82e10..390424f673 100644 --- a/core/rawdb/chain_iterator_test.go +++ b/core/rawdb/chain_iterator_test.go @@ -34,7 +34,7 @@ func TestChainIterator(t *testing.T) { var block *types.Block var txs []*types.Transaction to := common.BytesToAddress([]byte{0x11}) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) // Empty genesis block + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, newTestHasher()) // Empty genesis block WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) for i := uint64(1); i <= 10; i++ { @@ -60,7 +60,7 @@ func TestChainIterator(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, &types.Body{Transactions: types.Transactions{tx}}, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } @@ -111,7 +111,7 @@ func TestIndexTransactions(t *testing.T) { to := common.BytesToAddress([]byte{0x11}) // Write empty genesis block - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) @@ -138,7 +138,7 @@ func TestIndexTransactions(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, &types.Body{Transactions: types.Transactions{tx}}, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 1d11c5c283..04a809cf65 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -34,11 +34,13 @@ import ( "github.com/olekukonko/tablewriter" ) -// freezerdb is a database wrapper that enables freezer data retrievals. +// freezerdb is a database wrapper that enables ancient chain segment freezing. type freezerdb struct { - ancientRoot string ethdb.KeyValueStore - ethdb.AncientStore + *chainFreezer + + readOnly bool + ancientRoot string } // AncientDatadir returns the path of root ancient directory. @@ -55,7 +57,7 @@ func (frdb *freezerdb) AncientDatadir() (string, error) { // the slow ancient tables. func (frdb *freezerdb) Close() error { var errs []error - if err := frdb.AncientStore.Close(); err != nil { + if err := frdb.chainFreezer.Close(); err != nil { errs = append(errs, err) } if err := frdb.KeyValueStore.Close(); err != nil { @@ -71,12 +73,12 @@ func (frdb *freezerdb) Close() error { // a freeze cycle completes, without having to sleep for a minute to trigger the // automatic background run. func (frdb *freezerdb) Freeze() error { - if frdb.AncientStore.(*chainFreezer).readonly { + if frdb.readOnly { return errReadOnly } // Trigger a freeze cycle and block until it's done trigger := make(chan struct{}, 1) - frdb.AncientStore.(*chainFreezer).trigger <- trigger + frdb.chainFreezer.trigger <- trigger <-trigger return nil } @@ -225,8 +227,14 @@ func resolveChainFreezerDir(ancient string) string { // storage. The passed ancient indicates the path of root ancient directory // where the chain freezer can be opened. func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) { - // Create the idle freezer instance - frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly) + // Create the idle freezer instance. If the given ancient directory is empty, + // in-memory chain freezer is used (e.g. dev mode); otherwise the regular + // file-based freezer is created. + chainFreezerDir := ancient + if chainFreezerDir != "" { + chainFreezerDir = resolveChainFreezerDir(chainFreezerDir) + } + frdb, err := newChainFreezer(chainFreezerDir, namespace, readonly) if err != nil { printChainMetadata(db) return nil, err @@ -310,7 +318,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st } } // Freezer is consistent with the key-value database, permit combining the two - if !frdb.readonly { + if !readonly { frdb.wg.Add(1) go func() { frdb.freeze(db) @@ -320,7 +328,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st return &freezerdb{ ancientRoot: ancient, KeyValueStore: db, - AncientStore: frdb, + chainFreezer: frdb, }, nil } diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index dbee468e8a..4d78460920 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -61,7 +61,7 @@ const freezerTableSize = 2 * 1000 * 1000 * 1000 // reserving it for go-ethereum. This would also reduce the memory requirements // of Geth, and thus also GC overhead. type Freezer struct { - frozen atomic.Uint64 // Number of blocks already frozen + frozen atomic.Uint64 // Number of items already frozen tail atomic.Uint64 // Number of the first stored item in the freezer // This lock synchronizes writers and the truncate operation, as well as @@ -75,12 +75,6 @@ type Freezer struct { closeOnce sync.Once } -// NewChainFreezer is a small utility method around NewFreezer that sets the -// default parameters for the chain storage. -func NewChainFreezer(datadir string, namespace string, readonly bool) (*Freezer, error) { - return NewFreezer(datadir, namespace, readonly, freezerTableSize, chainFreezerNoSnappy) -} - // NewFreezer creates a freezer instance for maintaining immutable ordered // data according to the given parameters. // diff --git a/core/rawdb/freezer_memory.go b/core/rawdb/freezer_memory.go new file mode 100644 index 0000000000..954b58e874 --- /dev/null +++ b/core/rawdb/freezer_memory.go @@ -0,0 +1,428 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "errors" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" +) + +// memoryTable is used to store a list of sequential items in memory. +type memoryTable struct { + name string // Table name + items uint64 // Number of stored items in the table, including the deleted ones + offset uint64 // Number of deleted items from the table + data [][]byte // List of rlp-encoded items, sort in order + size uint64 // Total memory size occupied by the table + lock sync.RWMutex +} + +// newMemoryTable initializes the memory table. +func newMemoryTable(name string) *memoryTable { + return &memoryTable{name: name} +} + +// has returns an indicator whether the specified data exists. +func (t *memoryTable) has(number uint64) bool { + t.lock.RLock() + defer t.lock.RUnlock() + + return number >= t.offset && number < t.items +} + +// retrieve retrieves multiple items in sequence, starting from the index 'start'. +// It will return: +// - at most 'count' items, +// - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize), +// but will otherwise return as many items as fit into maxByteSize. +// - if maxBytes is not specified, 'count' items will be returned if they are present +func (t *memoryTable) retrieve(start uint64, count, maxBytes uint64) ([][]byte, error) { + t.lock.RLock() + defer t.lock.RUnlock() + + var ( + size uint64 + batch [][]byte + ) + // Ensure the start is written, not deleted from the tail, and that the + // caller actually wants something. + if t.items <= start || t.offset > start || count == 0 { + return nil, errOutOfBounds + } + // Cap the item count if the retrieval is out of bound. + if start+count > t.items { + count = t.items - start + } + for n := start; n < start+count; n++ { + index := n - t.offset + if len(batch) != 0 && maxBytes != 0 && size+uint64(len(t.data[index])) > maxBytes { + return batch, nil + } + batch = append(batch, t.data[index]) + size += uint64(len(t.data[index])) + } + return batch, nil +} + +// truncateHead discards any recent data above the provided threshold number. +func (t *memoryTable) truncateHead(items uint64) error { + t.lock.Lock() + defer t.lock.Unlock() + + // Short circuit if nothing to delete. + if t.items <= items { + return nil + } + if items < t.offset { + return errors.New("truncation below tail") + } + t.data = t.data[:items-t.offset] + t.items = items + return nil +} + +// truncateTail discards any recent data before the provided threshold number. +func (t *memoryTable) truncateTail(items uint64) error { + t.lock.Lock() + defer t.lock.Unlock() + + // Short circuit if nothing to delete. + if t.offset >= items { + return nil + } + if t.items < items { + return errors.New("truncation above head") + } + t.data = t.data[items-t.offset:] + t.offset = items + return nil +} + +// commit merges the given item batch into table. It's presumed that the +// batch is ordered and continuous with table. +func (t *memoryTable) commit(batch [][]byte) error { + t.lock.Lock() + defer t.lock.Unlock() + + for _, item := range batch { + t.size += uint64(len(item)) + } + t.data = append(t.data, batch...) + t.items += uint64(len(batch)) + return nil +} + +// memoryBatch is the singleton batch used for ancient write. +type memoryBatch struct { + data map[string][][]byte + next map[string]uint64 + size map[string]int64 +} + +func newMemoryBatch() *memoryBatch { + return &memoryBatch{ + data: make(map[string][][]byte), + next: make(map[string]uint64), + size: make(map[string]int64), + } +} + +func (b *memoryBatch) reset(freezer *MemoryFreezer) { + b.data = make(map[string][][]byte) + b.next = make(map[string]uint64) + b.size = make(map[string]int64) + + for name, table := range freezer.tables { + b.next[name] = table.items + } +} + +// Append adds an RLP-encoded item. +func (b *memoryBatch) Append(kind string, number uint64, item interface{}) error { + if b.next[kind] != number { + return errOutOrderInsertion + } + blob, err := rlp.EncodeToBytes(item) + if err != nil { + return err + } + b.data[kind] = append(b.data[kind], blob) + b.next[kind]++ + b.size[kind] += int64(len(blob)) + return nil +} + +// AppendRaw adds an item without RLP-encoding it. +func (b *memoryBatch) AppendRaw(kind string, number uint64, blob []byte) error { + if b.next[kind] != number { + return errOutOrderInsertion + } + b.data[kind] = append(b.data[kind], common.CopyBytes(blob)) + b.next[kind]++ + b.size[kind] += int64(len(blob)) + return nil +} + +// commit is called at the end of a write operation and writes all remaining +// data to tables. +func (b *memoryBatch) commit(freezer *MemoryFreezer) (items uint64, writeSize int64, err error) { + // Check that count agrees on all batches. + items = math.MaxUint64 + for name, next := range b.next { + if items < math.MaxUint64 && next != items { + return 0, 0, fmt.Errorf("table %s is at item %d, want %d", name, next, items) + } + items = next + } + // Commit all table batches. + for name, batch := range b.data { + table := freezer.tables[name] + if err := table.commit(batch); err != nil { + return 0, 0, err + } + writeSize += b.size[name] + } + return items, writeSize, nil +} + +// MemoryFreezer is an ephemeral ancient store. It implements the ethdb.AncientStore +// interface and can be used along with ephemeral key-value store. +type MemoryFreezer struct { + items uint64 // Number of items stored + tail uint64 // Number of the first stored item in the freezer + readonly bool // Flag if the freezer is only for reading + lock sync.RWMutex // Lock to protect fields + tables map[string]*memoryTable // Tables for storing everything + writeBatch *memoryBatch // Pre-allocated write batch +} + +// NewMemoryFreezer initializes an in-memory freezer instance. +func NewMemoryFreezer(readonly bool, tableName map[string]bool) *MemoryFreezer { + tables := make(map[string]*memoryTable) + for name := range tableName { + tables[name] = newMemoryTable(name) + } + return &MemoryFreezer{ + writeBatch: newMemoryBatch(), + readonly: readonly, + tables: tables, + } +} + +// HasAncient returns an indicator whether the specified data exists. +func (f *MemoryFreezer) HasAncient(kind string, number uint64) (bool, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + if table := f.tables[kind]; table != nil { + return table.has(number), nil + } + return false, nil +} + +// Ancient retrieves an ancient binary blob from the in-memory freezer. +func (f *MemoryFreezer) Ancient(kind string, number uint64) ([]byte, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + t := f.tables[kind] + if t == nil { + return nil, errUnknownTable + } + data, err := t.retrieve(number, 1, 0) + if err != nil { + return nil, err + } + return data[0], nil +} + +// AncientRange retrieves multiple items in sequence, starting from the index 'start'. +// It will return +// - at most 'count' items, +// - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize), +// but will otherwise return as many items as fit into maxByteSize. +// - if maxBytes is not specified, 'count' items will be returned if they are present +func (f *MemoryFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + t := f.tables[kind] + if t == nil { + return nil, errUnknownTable + } + return t.retrieve(start, count, maxBytes) +} + +// Ancients returns the ancient item numbers in the freezer. +func (f *MemoryFreezer) Ancients() (uint64, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + return f.items, nil +} + +// Tail returns the number of first stored item in the freezer. +// This number can also be interpreted as the total deleted item numbers. +func (f *MemoryFreezer) Tail() (uint64, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + return f.tail, nil +} + +// AncientSize returns the ancient size of the specified category. +func (f *MemoryFreezer) AncientSize(kind string) (uint64, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + if table := f.tables[kind]; table != nil { + return table.size, nil + } + return 0, errUnknownTable +} + +// ReadAncients runs the given read operation while ensuring that no writes take place +// on the underlying freezer. +func (f *MemoryFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { + f.lock.RLock() + defer f.lock.RUnlock() + + return fn(f) +} + +// ModifyAncients runs the given write operation. +func (f *MemoryFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.readonly { + return 0, errReadOnly + } + // Roll back all tables to the starting position in case of error. + defer func(old uint64) { + if err == nil { + return + } + // The write operation has failed. Go back to the previous item position. + for name, table := range f.tables { + err := table.truncateHead(old) + if err != nil { + log.Error("Freezer table roll-back failed", "table", name, "index", old, "err", err) + } + } + }(f.items) + + // Modify the ancients in batch. + f.writeBatch.reset(f) + if err := fn(f.writeBatch); err != nil { + return 0, err + } + item, writeSize, err := f.writeBatch.commit(f) + if err != nil { + return 0, err + } + f.items = item + return writeSize, nil +} + +// TruncateHead discards any recent data above the provided threshold number. +// It returns the previous head number. +func (f *MemoryFreezer) TruncateHead(items uint64) (uint64, error) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.readonly { + return 0, errReadOnly + } + old := f.items + if old <= items { + return old, nil + } + for _, table := range f.tables { + if err := table.truncateHead(items); err != nil { + return 0, err + } + } + f.items = items + return old, nil +} + +// TruncateTail discards any recent data below the provided threshold number. +func (f *MemoryFreezer) TruncateTail(tail uint64) (uint64, error) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.readonly { + return 0, errReadOnly + } + old := f.tail + if old >= tail { + return old, nil + } + for _, table := range f.tables { + if err := table.truncateTail(tail); err != nil { + return 0, err + } + } + f.tail = tail + return old, nil +} + +// Sync flushes all data tables to disk. +func (f *MemoryFreezer) Sync() error { + return nil +} + +// MigrateTable processes and migrates entries of a given table to a new format. +// The second argument is a function that takes a raw entry and returns it +// in the newest format. +func (f *MemoryFreezer) MigrateTable(string, func([]byte) ([]byte, error)) error { + return errors.New("not implemented") +} + +// Close releases all the sources held by the memory freezer. It will panic if +// any following invocation is made to a closed freezer. +func (f *MemoryFreezer) Close() error { + f.lock.Lock() + defer f.lock.Unlock() + + f.tables = nil + f.writeBatch = nil + return nil +} + +// Reset drops all the data cached in the memory freezer and reset itself +// back to default state. +func (f *MemoryFreezer) Reset() error { + f.lock.Lock() + defer f.lock.Unlock() + + tables := make(map[string]*memoryTable) + for name := range f.tables { + tables[name] = newMemoryTable(name) + } + f.tables = tables + f.items, f.tail = 0, 0 + return nil +} diff --git a/core/rawdb/freezer_memory_test.go b/core/rawdb/freezer_memory_test.go new file mode 100644 index 0000000000..e71de0f629 --- /dev/null +++ b/core/rawdb/freezer_memory_test.go @@ -0,0 +1,41 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "testing" + + "github.com/ethereum/go-ethereum/core/rawdb/ancienttest" + "github.com/ethereum/go-ethereum/ethdb" +) + +func TestMemoryFreezer(t *testing.T) { + ancienttest.TestAncientSuite(t, func(kinds []string) ethdb.AncientStore { + tables := make(map[string]bool) + for _, kind := range kinds { + tables[kind] = true + } + return NewMemoryFreezer(false, tables) + }) + ancienttest.TestResettableAncientSuite(t, func(kinds []string) ethdb.ResettableAncientStore { + tables := make(map[string]bool) + for _, kind := range kinds { + tables[kind] = true + } + return NewMemoryFreezer(false, tables) + }) +} diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go index 7a85489738..7fa59b8d21 100644 --- a/core/rawdb/freezer_resettable.go +++ b/core/rawdb/freezer_resettable.go @@ -30,16 +30,16 @@ const tmpSuffix = ".tmp" // freezerOpenFunc is the function used to open/create a freezer. type freezerOpenFunc = func() (*Freezer, error) -// ResettableFreezer is a wrapper of the freezer which makes the +// resettableFreezer is a wrapper of the freezer which makes the // freezer resettable. -type ResettableFreezer struct { +type resettableFreezer struct { freezer *Freezer opener freezerOpenFunc datadir string lock sync.RWMutex } -// NewResettableFreezer creates a resettable freezer, note freezer is +// newResettableFreezer creates a resettable freezer, note freezer is // only resettable if the passed file directory is exclusively occupied // by the freezer. And also the user-configurable ancient root directory // is **not** supported for reset since it might be a mount and rename @@ -48,7 +48,7 @@ type ResettableFreezer struct { // // The reset function will delete directory atomically and re-create the // freezer from scratch. -func NewResettableFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*ResettableFreezer, error) { +func newResettableFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*resettableFreezer, error) { if err := cleanup(datadir); err != nil { return nil, err } @@ -59,7 +59,7 @@ func NewResettableFreezer(datadir string, namespace string, readonly bool, maxTa if err != nil { return nil, err } - return &ResettableFreezer{ + return &resettableFreezer{ freezer: freezer, opener: opener, datadir: datadir, @@ -70,7 +70,7 @@ func NewResettableFreezer(datadir string, namespace string, readonly bool, maxTa // recreate the freezer from scratch. The atomicity of directory deletion // is guaranteed by the rename operation, the leftover directory will be // cleaned up in next startup in case crash happens after rename. -func (f *ResettableFreezer) Reset() error { +func (f *resettableFreezer) Reset() error { f.lock.Lock() defer f.lock.Unlock() @@ -93,7 +93,7 @@ func (f *ResettableFreezer) Reset() error { } // Close terminates the chain freezer, unmapping all the data files. -func (f *ResettableFreezer) Close() error { +func (f *resettableFreezer) Close() error { f.lock.RLock() defer f.lock.RUnlock() @@ -102,7 +102,7 @@ func (f *ResettableFreezer) Close() error { // HasAncient returns an indicator whether the specified ancient data exists // in the freezer -func (f *ResettableFreezer) HasAncient(kind string, number uint64) (bool, error) { +func (f *resettableFreezer) HasAncient(kind string, number uint64) (bool, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -110,7 +110,7 @@ func (f *ResettableFreezer) HasAncient(kind string, number uint64) (bool, error) } // Ancient retrieves an ancient binary blob from the append-only immutable files. -func (f *ResettableFreezer) Ancient(kind string, number uint64) ([]byte, error) { +func (f *resettableFreezer) Ancient(kind string, number uint64) ([]byte, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -123,7 +123,7 @@ func (f *ResettableFreezer) Ancient(kind string, number uint64) ([]byte, error) // - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize), // but will otherwise return as many items as fit into maxByteSize. // - if maxBytes is not specified, 'count' items will be returned if they are present. -func (f *ResettableFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { +func (f *resettableFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -131,7 +131,7 @@ func (f *ResettableFreezer) AncientRange(kind string, start, count, maxBytes uin } // Ancients returns the length of the frozen items. -func (f *ResettableFreezer) Ancients() (uint64, error) { +func (f *resettableFreezer) Ancients() (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -139,7 +139,7 @@ func (f *ResettableFreezer) Ancients() (uint64, error) { } // Tail returns the number of first stored item in the freezer. -func (f *ResettableFreezer) Tail() (uint64, error) { +func (f *resettableFreezer) Tail() (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -147,7 +147,7 @@ func (f *ResettableFreezer) Tail() (uint64, error) { } // AncientSize returns the ancient size of the specified category. -func (f *ResettableFreezer) AncientSize(kind string) (uint64, error) { +func (f *resettableFreezer) AncientSize(kind string) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -156,7 +156,7 @@ func (f *ResettableFreezer) AncientSize(kind string) (uint64, error) { // ReadAncients runs the given read operation while ensuring that no writes take place // on the underlying freezer. -func (f *ResettableFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { +func (f *resettableFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { f.lock.RLock() defer f.lock.RUnlock() @@ -164,7 +164,7 @@ func (f *ResettableFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) ( } // ModifyAncients runs the given write operation. -func (f *ResettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { +func (f *resettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { f.lock.RLock() defer f.lock.RUnlock() @@ -173,7 +173,7 @@ func (f *ResettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) // TruncateHead discards any recent data above the provided threshold number. // It returns the previous head number. -func (f *ResettableFreezer) TruncateHead(items uint64) (uint64, error) { +func (f *resettableFreezer) TruncateHead(items uint64) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -182,7 +182,7 @@ func (f *ResettableFreezer) TruncateHead(items uint64) (uint64, error) { // TruncateTail discards any recent data below the provided threshold number. // It returns the previous value -func (f *ResettableFreezer) TruncateTail(tail uint64) (uint64, error) { +func (f *resettableFreezer) TruncateTail(tail uint64) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -190,7 +190,7 @@ func (f *ResettableFreezer) TruncateTail(tail uint64) (uint64, error) { } // Sync flushes all data tables to disk. -func (f *ResettableFreezer) Sync() error { +func (f *resettableFreezer) Sync() error { f.lock.RLock() defer f.lock.RUnlock() @@ -199,7 +199,7 @@ func (f *ResettableFreezer) Sync() error { // MigrateTable processes the entries in a given table in sequence // converting them to a new format if they're of an old format. -func (f *ResettableFreezer) MigrateTable(kind string, convert convertLegacyFn) error { +func (f *resettableFreezer) MigrateTable(kind string, convert convertLegacyFn) error { f.lock.RLock() defer f.lock.RUnlock() diff --git a/core/rawdb/freezer_resettable_test.go b/core/rawdb/freezer_resettable_test.go index d741bc14e5..61dc23d798 100644 --- a/core/rawdb/freezer_resettable_test.go +++ b/core/rawdb/freezer_resettable_test.go @@ -33,7 +33,7 @@ func TestResetFreezer(t *testing.T) { {1, bytes.Repeat([]byte{1}, 2048)}, {2, bytes.Repeat([]byte{2}, 2048)}, } - f, _ := NewResettableFreezer(t.TempDir(), "", false, 2048, freezerTestTableDef) + f, _ := newResettableFreezer(t.TempDir(), "", false, 2048, freezerTestTableDef) defer f.Close() f.ModifyAncients(func(op ethdb.AncientWriteOp) error { @@ -87,7 +87,7 @@ func TestFreezerCleanup(t *testing.T) { {2, bytes.Repeat([]byte{2}, 2048)}, } datadir := t.TempDir() - f, _ := NewResettableFreezer(datadir, "", false, 2048, freezerTestTableDef) + f, _ := newResettableFreezer(datadir, "", false, 2048, freezerTestTableDef) f.ModifyAncients(func(op ethdb.AncientWriteOp) error { for _, item := range items { op.AppendRaw("test", item.id, item.blob) @@ -98,7 +98,7 @@ func TestFreezerCleanup(t *testing.T) { os.Rename(datadir, tmpName(datadir)) // Open the freezer again, trigger cleanup operation - f, _ = NewResettableFreezer(datadir, "", false, 2048, freezerTestTableDef) + f, _ = newResettableFreezer(datadir, "", false, 2048, freezerTestTableDef) f.Close() if _, err := os.Lstat(tmpName(datadir)); !os.IsNotExist(err) { diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go index 93bc2c2254..72d1417200 100644 --- a/core/rawdb/freezer_test.go +++ b/core/rawdb/freezer_test.go @@ -27,6 +27,7 @@ import ( "sync" "testing" + "github.com/ethereum/go-ethereum/core/rawdb/ancienttest" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/require" @@ -480,3 +481,22 @@ func TestFreezerCloseSync(t *testing.T) { t.Fatalf("want %v, have %v", have, want) } } + +func TestFreezerSuite(t *testing.T) { + ancienttest.TestAncientSuite(t, func(kinds []string) ethdb.AncientStore { + tables := make(map[string]bool) + for _, kind := range kinds { + tables[kind] = true + } + f, _ := newFreezerForTesting(t, tables) + return f + }) + ancienttest.TestResettableAncientSuite(t, func(kinds []string) ethdb.ResettableAncientStore { + tables := make(map[string]bool) + for _, kind := range kinds { + tables[kind] = true + } + f, _ := newResettableFreezer(t.TempDir(), "", false, 2048, tables) + return f + }) +} diff --git a/core/state/access_list.go b/core/state/access_list.go index 718bf17cf7..b0effbeadc 100644 --- a/core/state/access_list.go +++ b/core/state/access_list.go @@ -17,7 +17,10 @@ package state import ( + "fmt" "maps" + "slices" + "strings" "github.com/ethereum/go-ethereum/common" ) @@ -130,3 +133,35 @@ func (al *accessList) DeleteSlot(address common.Address, slot common.Hash) { func (al *accessList) DeleteAddress(address common.Address) { delete(al.addresses, address) } + +// Equal returns true if the two access lists are identical +func (al *accessList) Equal(other *accessList) bool { + if !maps.Equal(al.addresses, other.addresses) { + return false + } + return slices.EqualFunc(al.slots, other.slots, + func(m map[common.Hash]struct{}, m2 map[common.Hash]struct{}) bool { + return maps.Equal(m, m2) + }) +} + +// PrettyPrint prints the contents of the access list in a human-readable form +func (al *accessList) PrettyPrint() string { + out := new(strings.Builder) + var sortedAddrs []common.Address + for addr := range al.addresses { + sortedAddrs = append(sortedAddrs, addr) + } + slices.SortFunc(sortedAddrs, common.Address.Cmp) + for _, addr := range sortedAddrs { + idx := al.addresses[addr] + fmt.Fprintf(out, "%#x : (idx %d)\n", addr, idx) + if idx >= 0 { + slotmap := al.slots[idx] + for h := range slotmap { + fmt.Fprintf(out, " %#x\n", h) + } + } + } + return out.String() +} diff --git a/core/state/journal.go b/core/state/journal.go index 5b7cb40c7e..c0f5615c98 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -17,6 +17,8 @@ package state import ( + "maps" + "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" ) @@ -29,6 +31,9 @@ type journalEntry interface { // dirtied returns the Ethereum address modified by this journal entry. dirtied() *common.Address + + // copy returns a deep-copied journal entry. + copy() journalEntry } // journal contains the list of state modifications applied since the last state @@ -83,21 +88,31 @@ func (j *journal) length() int { return len(j.entries) } +// copy returns a deep-copied journal. +func (j *journal) copy() *journal { + entries := make([]journalEntry, 0, j.length()) + for i := 0; i < j.length(); i++ { + entries = append(entries, j.entries[i].copy()) + } + return &journal{ + entries: entries, + dirties: maps.Clone(j.dirties), + } +} + type ( // Changes to the account trie. createObjectChange struct { account *common.Address } - resetObjectChange struct { - prev *stateObject - prevdestruct bool - prevAccount []byte - prevStorage map[common.Hash][]byte - prevAccountOriginExist bool - prevAccountOrigin []byte - prevStorageOrigin map[common.Hash][]byte + // createContractChange represents an account becoming a contract-account. + // This event happens prior to executing initcode. The journal-event simply + // manages the created-flag, in order to allow same-tx destruction. + createContractChange struct { + account common.Address } + selfDestructChange struct { account *common.Address prev bool // whether account had already self-destructed @@ -114,8 +129,9 @@ type ( prev uint64 } storageChange struct { - account *common.Address - key, prevalue common.Hash + account *common.Address + key common.Hash + prevvalue *common.Hash } codeChange struct { account *common.Address @@ -135,6 +151,7 @@ type ( touchChange struct { account *common.Address } + // Changes to the access list accessListAddAccountChange struct { address *common.Address @@ -144,6 +161,7 @@ type ( slot *common.Hash } + // Changes to transient storage transientStorageChange struct { account *common.Address key, prevalue common.Hash @@ -152,42 +170,32 @@ type ( func (ch createObjectChange) revert(s *StateDB) { delete(s.stateObjects, *ch.account) - delete(s.stateObjectsDirty, *ch.account) } func (ch createObjectChange) dirtied() *common.Address { return ch.account } -func (ch resetObjectChange) revert(s *StateDB) { - s.setStateObject(ch.prev) - if !ch.prevdestruct { - delete(s.stateObjectsDestruct, ch.prev.address) - } - if ch.prevAccount != nil { - s.accounts[ch.prev.addrHash] = ch.prevAccount - } - if ch.prevStorage != nil { - s.storages[ch.prev.addrHash] = ch.prevStorage - } - if ch.prevAccountOriginExist { - s.accountsOrigin[ch.prev.address] = ch.prevAccountOrigin - } - if ch.prevStorageOrigin != nil { - s.storagesOrigin[ch.prev.address] = ch.prevStorageOrigin +func (ch createObjectChange) copy() journalEntry { + return createObjectChange{ + account: ch.account, } } -func (ch resetObjectChange) dirtied() *common.Address { - // Arbitrum: We keep the behavior that existed before go-ethereum v1.12.1 and return nil, - // instead of returning the reset address as upstream go-ethereum v1.12.1 does. - // That's because, unlike for go-ethereum, whether this account is dirty or not is relevant for Arbitrum. - // Arbitrum hooks manipulate the state in some ways that go-ethereum doesn't which cause that relevance, - // e.g. subtracting balance from an account that hasn't been otherwise touched. - // See https://github.com/OffchainLabs/nitro/pull/1976 for details +func (ch createContractChange) revert(s *StateDB) { + s.getStateObject(ch.account).newContract = false +} + +func (ch createContractChange) dirtied() *common.Address { return nil } +func (ch createContractChange) copy() journalEntry { + return createContractChange{ + account: ch.account, + } +} + func (ch selfDestructChange) revert(s *StateDB) { obj := s.getStateObject(*ch.account) if obj != nil { @@ -200,6 +208,14 @@ func (ch selfDestructChange) dirtied() *common.Address { return ch.account } +func (ch selfDestructChange) copy() journalEntry { + return selfDestructChange{ + account: ch.account, + prev: ch.prev, + prevbalance: new(uint256.Int).Set(ch.prevbalance), + } +} + var ripemd = common.HexToAddress("0000000000000000000000000000000000000003") func (ch touchChange) revert(s *StateDB) { @@ -209,6 +225,12 @@ func (ch touchChange) dirtied() *common.Address { return ch.account } +func (ch touchChange) copy() journalEntry { + return touchChange{ + account: ch.account, + } +} + func (ch balanceChange) revert(s *StateDB) { s.getStateObject(*ch.account).setBalance(ch.prev) } @@ -217,6 +239,13 @@ func (ch balanceChange) dirtied() *common.Address { return ch.account } +func (ch balanceChange) copy() journalEntry { + return balanceChange{ + account: ch.account, + prev: new(uint256.Int).Set(ch.prev), + } +} + func (ch nonceChange) revert(s *StateDB) { s.getStateObject(*ch.account).setNonce(ch.prev) } @@ -225,6 +254,13 @@ func (ch nonceChange) dirtied() *common.Address { return ch.account } +func (ch nonceChange) copy() journalEntry { + return nonceChange{ + account: ch.account, + prev: ch.prev, + } +} + func (ch codeChange) revert(s *StateDB) { s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode) } @@ -233,14 +269,30 @@ func (ch codeChange) dirtied() *common.Address { return ch.account } +func (ch codeChange) copy() journalEntry { + return codeChange{ + account: ch.account, + prevhash: common.CopyBytes(ch.prevhash), + prevcode: common.CopyBytes(ch.prevcode), + } +} + func (ch storageChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setState(ch.key, ch.prevalue) + s.getStateObject(*ch.account).setState(ch.key, ch.prevvalue) } func (ch storageChange) dirtied() *common.Address { return ch.account } +func (ch storageChange) copy() journalEntry { + return storageChange{ + account: ch.account, + key: ch.key, + prevvalue: ch.prevvalue, + } +} + func (ch transientStorageChange) revert(s *StateDB) { s.setTransientState(*ch.account, ch.key, ch.prevalue) } @@ -249,6 +301,14 @@ func (ch transientStorageChange) dirtied() *common.Address { return nil } +func (ch transientStorageChange) copy() journalEntry { + return transientStorageChange{ + account: ch.account, + key: ch.key, + prevalue: ch.prevalue, + } +} + func (ch refundChange) revert(s *StateDB) { s.refund = ch.prev } @@ -257,6 +317,12 @@ func (ch refundChange) dirtied() *common.Address { return nil } +func (ch refundChange) copy() journalEntry { + return refundChange{ + prev: ch.prev, + } +} + func (ch addLogChange) revert(s *StateDB) { logs := s.logs[ch.txhash] if len(logs) == 1 { @@ -271,6 +337,12 @@ func (ch addLogChange) dirtied() *common.Address { return nil } +func (ch addLogChange) copy() journalEntry { + return addLogChange{ + txhash: ch.txhash, + } +} + func (ch addPreimageChange) revert(s *StateDB) { delete(s.preimages, ch.hash) } @@ -279,6 +351,12 @@ func (ch addPreimageChange) dirtied() *common.Address { return nil } +func (ch addPreimageChange) copy() journalEntry { + return addPreimageChange{ + hash: ch.hash, + } +} + func (ch accessListAddAccountChange) revert(s *StateDB) { /* One important invariant here, is that whenever a (addr, slot) is added, if the @@ -296,6 +374,12 @@ func (ch accessListAddAccountChange) dirtied() *common.Address { return nil } +func (ch accessListAddAccountChange) copy() journalEntry { + return accessListAddAccountChange{ + address: ch.address, + } +} + func (ch accessListAddSlotChange) revert(s *StateDB) { s.accessList.DeleteSlot(*ch.address, *ch.slot) } @@ -303,3 +387,10 @@ func (ch accessListAddSlotChange) revert(s *StateDB) { func (ch accessListAddSlotChange) dirtied() *common.Address { return nil } + +func (ch accessListAddSlotChange) copy() journalEntry { + return accessListAddSlotChange{ + address: ch.address, + slot: ch.slot, + } +} diff --git a/core/state/journal_arbitrum.go b/core/state/journal_arbitrum.go index 69b2415b79..b286303150 100644 --- a/core/state/journal_arbitrum.go +++ b/core/state/journal_arbitrum.go @@ -16,6 +16,12 @@ func (ch wasmActivation) dirtied() *common.Address { return nil } +func (ch wasmActivation) copy() journalEntry { + return wasmActivation{ + moduleHash: ch.moduleHash, + } +} + // Updates the Rust-side recent program cache var CacheWasmRust func(asm []byte, moduleHash common.Hash, version uint16, tag uint32, debug bool) = func([]byte, common.Hash, uint16, uint32, bool) {} var EvictWasmRust func(moduleHash common.Hash, version uint16, tag uint32, debug bool) = func(common.Hash, uint16, uint32, bool) {} @@ -35,6 +41,15 @@ func (ch CacheWasm) dirtied() *common.Address { return nil } +func (ch CacheWasm) copy() journalEntry { + return CacheWasm{ + ModuleHash: ch.ModuleHash, + Version: ch.Version, + Tag: ch.Tag, + Debug: ch.Debug, + } +} + type EvictWasm struct { ModuleHash common.Hash Version uint16 @@ -53,3 +68,12 @@ func (ch EvictWasm) revert(s *StateDB) { func (ch EvictWasm) dirtied() *common.Address { return nil } + +func (ch EvictWasm) copy() journalEntry { + return EvictWasm{ + ModuleHash: ch.ModuleHash, + Version: ch.Version, + Tag: ch.Tag, + Debug: ch.Debug, + } +} diff --git a/core/state/state_object.go b/core/state/state_object.go index db3c32f2f2..d75ba01376 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -27,26 +27,14 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/holiman/uint256" ) -type Code []byte - -func (c Code) String() string { - return string(c) //strings.Join(Disassemble(c), " ") -} - type Storage map[common.Hash]common.Hash -func (s Storage) String() (str string) { - for key, value := range s { - str += fmt.Sprintf("%X : %X\n", key, value) - } - return -} - func (s Storage) Copy() Storage { return maps.Clone(s) } @@ -65,8 +53,8 @@ type stateObject struct { data types.StateAccount // Account data with all mutations applied in the scope of block // Write caches. - trie Trie // storage trie, which becomes non-nil on first access - code Code // contract bytecode, which gets set when code is loaded + trie Trie // storage trie, which becomes non-nil on first access + code []byte // contract bytecode, which gets set when code is loaded originStorage Storage // Storage cache of original entries to dedup rewrites pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block @@ -75,17 +63,16 @@ type stateObject struct { // Cache flags. dirtyCode bool // true if the code was updated - // Flag whether the account was marked as self-destructed. The self-destructed account - // is still accessible in the scope of same transaction. + // Flag whether the account was marked as self-destructed. The self-destructed + // account is still accessible in the scope of same transaction. selfDestructed bool - // Flag whether the account was marked as deleted. A self-destructed account - // or an account that is considered as empty will be marked as deleted at - // the end of transaction and no longer accessible anymore. - deleted bool - - // Flag whether the object was created in the current transaction - created bool + // This is an EIP-6780 flag indicating whether the object is eligible for + // self-destruct according to EIP-6780. The flag could be set either when + // the contract is just created within the current transaction, or when the + // object was previously existent and is being deployed as a contract within + // the current transaction. + newContract bool } // empty returns whether the account is considered empty. @@ -95,10 +82,7 @@ func (s *stateObject) empty() bool { // newObject creates a state object. func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *stateObject { - var ( - origin = acct - created = acct == nil // true if the account was not existent - ) + origin := acct if acct == nil { acct = types.NewEmptyStateAccount() } @@ -111,7 +95,6 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s originStorage: make(Storage), pendingStorage: make(Storage), dirtyStorage: make(Storage), - created: created, } } @@ -158,13 +141,20 @@ func (s *stateObject) getTrie() (Trie, error) { // GetState retrieves a value from the account storage trie. func (s *stateObject) GetState(key common.Hash) common.Hash { + value, _ := s.getState(key) + return value +} + +// getState retrieves a value from the account storage trie and also returns if +// the slot is already dirty or not. +func (s *stateObject) getState(key common.Hash) (common.Hash, bool) { // If we have a dirty value for this state entry, return it value, dirty := s.dirtyStorage[key] if dirty { - return value + return value, true } // Otherwise return the entry's original value - return s.GetCommittedState(key) + return s.GetCommittedState(key), false } // GetCommittedState retrieves a value from the committed account storage trie. @@ -227,25 +217,38 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { // SetState updates a value in account storage. func (s *stateObject) SetState(key, value common.Hash) { - // If the new value is the same as old, don't set - prev := s.GetState(key) + // If the new value is the same as old, don't set. Otherwise, track only the + // dirty changes, supporting reverting all of it back to no change. + prev, dirty := s.getState(key) if prev == value { return } + var prevvalue *common.Hash + if dirty { + prevvalue = &prev + } // New value is different, update and journal the change s.db.journal.append(storageChange{ - account: &s.address, - key: key, - prevalue: prev, + account: &s.address, + key: key, + prevvalue: prevvalue, }) if s.db.logger != nil && s.db.logger.OnStorageChange != nil { s.db.logger.OnStorageChange(s.address, key, prev, value) } - s.setState(key, value) + s.setState(key, &value) } -func (s *stateObject) setState(key, value common.Hash) { - s.dirtyStorage[key] = value +// setState updates a value in account dirty storage. If the value being set is +// nil (assuming journal revert), the dirtyness is removed. +func (s *stateObject) setState(key common.Hash, value *common.Hash) { + // If the first set is being reverted, undo the dirty marker + if value == nil { + delete(s.dirtyStorage, key) + return + } + // Otherwise restore the previous value + s.dirtyStorage[key] = *value } // finalise moves all dirty storage slots into the pending area to be hashed or @@ -253,9 +256,16 @@ func (s *stateObject) setState(key, value common.Hash) { func (s *stateObject) finalise(prefetch bool) { slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) for key, value := range s.dirtyStorage { - s.pendingStorage[key] = value + // If the slot is different from its original value, move it into the + // pending area to be committed at the end of the block (and prefetch + // the pathways). if value != s.originStorage[key] { + s.pendingStorage[key] = value slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + } else { + // Otherwise, the slot was reverted to its original value, remove it + // from the pending area to avoid thrashing the data strutures. + delete(s.pendingStorage, key) } } if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { @@ -264,6 +274,10 @@ func (s *stateObject) finalise(prefetch bool) { if len(s.dirtyStorage) > 0 { s.dirtyStorage = make(Storage) } + // Revoke the flag at the end of the transaction. It finalizes the status + // of the newly-created object as it's no longer eligible for self-destruct + // by EIP-6780. For non-newly-created objects, it's a no-op. + s.newContract = false } // updateTrie is responsible for persisting cached storage changes into the @@ -280,9 +294,6 @@ func (s *stateObject) updateTrie() (Trie, error) { if len(s.pendingStorage) == 0 { return s.trie, nil } - // Track the amount of time wasted on updating the storage trie - defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now()) - // The snapshot storage map for the object var ( storage map[common.Hash][]byte @@ -365,6 +376,11 @@ func (s *stateObject) updateTrie() (Trie, error) { } s.db.StorageDeleted += 1 } + // If no slots were touched, issue a warning as we shouldn't have done all + // the above work in the first place + if len(usedStorage) == 0 { + log.Error("State object update was noop", "addr", s.address, "slots", len(s.pendingStorage)) + } if s.db.prefetcher != nil { s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage) } @@ -381,24 +397,21 @@ func (s *stateObject) updateRoot() { if err != nil || tr == nil { return } - // Track the amount of time wasted on hashing the storage trie - defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now()) - s.data.Root = tr.Hash() } // commit obtains a set of dirty storage trie nodes and updates the account data. // The returned set can be nil if nothing to commit. This function assumes all // storage mutations have already been flushed into trie by updateRoot. +// +// Note, commit may run concurrently across all the state objects. Do not assume +// thread-safe access to the statedb. func (s *stateObject) commit() (*trienode.NodeSet, error) { // Short circuit if trie is not even loaded, don't bother with committing anything if s.trie == nil { s.origin = s.data.Copy() return nil, nil } - // Track the amount of time wasted on committing the storage trie - defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now()) - // The trie is currently in an open state and could potentially contain // cached mutations. Call commit to acquire a set of nodes that have been // modified, the set can be nil if nothing to commit. @@ -453,22 +466,22 @@ func (s *stateObject) setBalance(amount *uint256.Int) { func (s *stateObject) deepCopy(db *StateDB) *stateObject { obj := &stateObject{ - db: db, - address: s.address, - addrHash: s.addrHash, - origin: s.origin, - data: s.data, + db: db, + address: s.address, + addrHash: s.addrHash, + origin: s.origin, + data: s.data, + code: s.code, + originStorage: s.originStorage.Copy(), + pendingStorage: s.pendingStorage.Copy(), + dirtyStorage: s.dirtyStorage.Copy(), + dirtyCode: s.dirtyCode, + selfDestructed: s.selfDestructed, + newContract: s.newContract, } if s.trie != nil { obj.trie = db.db.CopyTrie(s.trie) } - obj.code = s.code - obj.dirtyStorage = s.dirtyStorage.Copy() - obj.originStorage = s.originStorage.Copy() - obj.pendingStorage = s.pendingStorage.Copy() - obj.selfDestructed = s.selfDestructed - obj.dirtyCode = s.dirtyCode - obj.deleted = s.deleted return obj } @@ -483,7 +496,7 @@ func (s *stateObject) Address() common.Address { // Code returns the contract code associated with this object, if any. func (s *stateObject) Code() []byte { - if s.code != nil { + if len(s.code) != 0 { return s.code } if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { @@ -501,7 +514,7 @@ func (s *stateObject) Code() []byte { // or zero if none. This method is an almost mirror of Code, but uses a cache // inside the database to avoid loading codes seen recently. func (s *stateObject) CodeSize() int { - if s.code != nil { + if len(s.code) != 0 { return len(s.code) } if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { diff --git a/core/state/state_test.go b/core/state/state_test.go index c6e6db906e..9200e4abe9 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -194,106 +194,20 @@ func TestSnapshotEmpty(t *testing.T) { s.state.RevertToSnapshot(s.state.Snapshot()) } -func TestSnapshot2(t *testing.T) { +func TestCreateObjectRevert(t *testing.T) { state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) + addr := common.BytesToAddress([]byte("so0")) + snap := state.Snapshot() - stateobjaddr0 := common.BytesToAddress([]byte("so0")) - stateobjaddr1 := common.BytesToAddress([]byte("so1")) - var storageaddr common.Hash - - data0 := common.BytesToHash([]byte{17}) - data1 := common.BytesToHash([]byte{18}) - - state.SetState(stateobjaddr0, storageaddr, data0) - state.SetState(stateobjaddr1, storageaddr, data1) - - // db, trie are already non-empty values - so0 := state.getStateObject(stateobjaddr0) + state.CreateAccount(addr) + so0 := state.getStateObject(addr) so0.SetBalance(uint256.NewInt(42), tracing.BalanceChangeUnspecified) so0.SetNonce(43) so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'}) - so0.selfDestructed = false - so0.deleted = false state.setStateObject(so0) - root, _ := state.Commit(0, false) - state, _ = New(root, state.db, state.snaps) - - // and one with deleted == true - so1 := state.getStateObject(stateobjaddr1) - so1.SetBalance(uint256.NewInt(52), tracing.BalanceChangeUnspecified) - so1.SetNonce(53) - so1.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e', '2'}), []byte{'c', 'a', 'f', 'e', '2'}) - so1.selfDestructed = true - so1.deleted = true - state.setStateObject(so1) - - so1 = state.getStateObject(stateobjaddr1) - if so1 != nil { - t.Fatalf("deleted object not nil when getting") - } - - snapshot := state.Snapshot() - state.RevertToSnapshot(snapshot) - - so0Restored := state.getStateObject(stateobjaddr0) - // Update lazily-loaded values before comparing. - so0Restored.GetState(storageaddr) - so0Restored.Code() - // non-deleted is equal (restored) - compareStateObjects(so0Restored, so0, t) - - // deleted should be nil, both before and after restore of state copy - so1Restored := state.getStateObject(stateobjaddr1) - if so1Restored != nil { - t.Fatalf("deleted object not nil after restoring snapshot: %+v", so1Restored) - } -} - -func compareStateObjects(so0, so1 *stateObject, t *testing.T) { - if so0.Address() != so1.Address() { - t.Fatalf("Address mismatch: have %v, want %v", so0.address, so1.address) - } - if so0.Balance().Cmp(so1.Balance()) != 0 { - t.Fatalf("Balance mismatch: have %v, want %v", so0.Balance(), so1.Balance()) - } - if so0.Nonce() != so1.Nonce() { - t.Fatalf("Nonce mismatch: have %v, want %v", so0.Nonce(), so1.Nonce()) - } - if so0.data.Root != so1.data.Root { - t.Errorf("Root mismatch: have %x, want %x", so0.data.Root[:], so1.data.Root[:]) - } - if !bytes.Equal(so0.CodeHash(), so1.CodeHash()) { - t.Fatalf("CodeHash mismatch: have %v, want %v", so0.CodeHash(), so1.CodeHash()) - } - if !bytes.Equal(so0.code, so1.code) { - t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code) - } - - if len(so1.dirtyStorage) != len(so0.dirtyStorage) { - t.Errorf("Dirty storage size mismatch: have %d, want %d", len(so1.dirtyStorage), len(so0.dirtyStorage)) - } - for k, v := range so1.dirtyStorage { - if so0.dirtyStorage[k] != v { - t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, so0.dirtyStorage[k], v) - } - } - for k, v := range so0.dirtyStorage { - if so1.dirtyStorage[k] != v { - t.Errorf("Dirty storage key %x mismatch: have %v, want none.", k, v) - } - } - if len(so1.originStorage) != len(so0.originStorage) { - t.Errorf("Origin storage size mismatch: have %d, want %d", len(so1.originStorage), len(so0.originStorage)) - } - for k, v := range so1.originStorage { - if so0.originStorage[k] != v { - t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, so0.originStorage[k], v) - } - } - for k, v := range so0.originStorage { - if so1.originStorage[k] != v { - t.Errorf("Origin storage key %x mismatch: have %v, want none.", k, v) - } + state.RevertToSnapshot(snap) + if state.Exist(addr) { + t.Error("Unexpected account after revert") } } diff --git a/core/state/statedb.go b/core/state/statedb.go index d1aaaf591a..5cf8e066dc 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -24,6 +24,7 @@ import ( "math/big" "slices" "sort" + "sync" "time" "github.com/ethereum/go-ethereum/common" @@ -38,8 +39,12 @@ import ( "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" "github.com/holiman/uint256" + "golang.org/x/sync/errgroup" ) +// TriesInMemory represents the number of layers that are kept in RAM. +const TriesInMemory = 128 + type revision struct { id int journalIndex int @@ -48,6 +53,26 @@ type revision struct { unexpectedBalanceDelta *big.Int } +type mutationType int + +const ( + update mutationType = iota + deletion +) + +type mutation struct { + typ mutationType + applied bool +} + +func (m *mutation) copy() *mutation { + return &mutation{typ: m.typ, applied: m.applied} +} + +func (m *mutation) isDelete() bool { + return m.typ == deletion +} + // StateDB structs within the ethereum protocol are used to store anything // within the merkle trie. StateDBs take care of caching and storing // nested states. It's the general query interface to retrieve: @@ -81,12 +106,22 @@ type StateDB struct { accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format - // This map holds 'live' objects, which will get modified while processing - // a state transition. - stateObjects map[common.Address]*stateObject - stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie - stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution - stateObjectsDestruct map[common.Address]*types.StateAccount // State objects destructed in the block along with its previous value + // This map holds 'live' objects, which will get modified while + // processing a state transition. + stateObjects map[common.Address]*stateObject + + // This map holds 'deleted' objects. An object with the same address + // might also occur in the 'stateObjects' map due to account + // resurrection. The account value is tracked as the original value + // before the transition. This map is populated at the transaction + // boundaries. + stateObjectsDestruct map[common.Address]*types.StateAccount + + // This map tracks the account mutations that occurred during the + // transition. Uncommitted mutations belonging to the same account + // can be merged into a single one which is equivalent from database's + // perspective. This map is populated at the transaction boundaries. + mutations map[common.Address]*mutation // DB error. // State objects are used by the consensus core and VM which are @@ -127,7 +162,6 @@ type StateDB struct { AccountUpdates time.Duration AccountCommits time.Duration StorageReads time.Duration - StorageHashes time.Duration StorageUpdates time.Duration StorageCommits time.Duration SnapshotAccountReads time.Duration @@ -170,9 +204,8 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) accountsOrigin: make(map[common.Address][]byte), storagesOrigin: make(map[common.Address]map[common.Hash][]byte), stateObjects: make(map[common.Address]*stateObject), - stateObjectsPending: make(map[common.Address]struct{}), - stateObjectsDirty: make(map[common.Address]struct{}), stateObjectsDestruct: make(map[common.Address]*types.StateAccount), + mutations: make(map[common.Address]*mutation), logs: make(map[common.Hash][]*types.Log), preimages: make(map[common.Hash][]byte), journal: newJournal(), @@ -505,8 +538,7 @@ func (s *StateDB) Selfdestruct6780(addr common.Address) { if stateObject == nil { return } - - if stateObject.created { + if stateObject.newContract { s.SelfDestruct(addr) } } @@ -585,24 +617,16 @@ func (s *StateDB) deleteStateObject(addr common.Address) { } // getStateObject retrieves a state object given by the address, returning nil if -// the object is not found or was deleted in this execution context. If you need -// to differentiate between non-existent/just-deleted, use getDeletedStateObject. +// the object is not found or was deleted in this execution context. func (s *StateDB) getStateObject(addr common.Address) *stateObject { - if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { - return obj - } - return nil -} - -// getDeletedStateObject is similar to getStateObject, but instead of returning -// nil for a deleted state object, it returns the actual object with the deleted -// flag set. This is needed by the state journal to revert to the correct s- -// destructed object instead of wiping all knowledge about the state object. -func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { // Prefer live objects if any is available if obj := s.stateObjects[addr]; obj != nil { return obj } + // Short circuit if the account is already destructed in this block. + if _, ok := s.stateObjectsDestruct[addr]; ok { + return nil + } // If no live objects are available, attempt to use snapshots var data *types.StateAccount if s.snap != nil { @@ -655,68 +679,40 @@ func (s *StateDB) setStateObject(object *stateObject) { // getOrNewStateObject retrieves a state object or create a new state object if nil. func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject { - stateObject := s.getStateObject(addr) - if stateObject == nil { - stateObject, _ = s.createObject(addr) + obj := s.getStateObject(addr) + if obj == nil { + obj = s.createObject(addr) } - return stateObject + return obj } -// createObject creates a new state object. If there is an existing account with -// the given address, it is overwritten and returned as the second return value. -func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) { - prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! - newobj = newObject(s, addr, nil) - if prev == nil { - s.journal.append(createObjectChange{account: &addr}) - } else { - // The original account should be marked as destructed and all cached - // account and storage data should be cleared as well. Note, it must - // be done here, otherwise the destruction event of "original account" - // will be lost. - _, prevdestruct := s.stateObjectsDestruct[prev.address] - if !prevdestruct { - s.stateObjectsDestruct[prev.address] = prev.origin - } - // There may be some cached account/storage data already since IntermediateRoot - // will be called for each transaction before byzantium fork which will always - // cache the latest account/storage data. - prevAccount, ok := s.accountsOrigin[prev.address] - s.journal.append(resetObjectChange{ - prev: prev, - prevdestruct: prevdestruct, - prevAccount: s.accounts[prev.addrHash], - prevStorage: s.storages[prev.addrHash], - prevAccountOriginExist: ok, - prevAccountOrigin: prevAccount, - prevStorageOrigin: s.storagesOrigin[prev.address], - }) - delete(s.accounts, prev.addrHash) - delete(s.storages, prev.addrHash) - delete(s.accountsOrigin, prev.address) - delete(s.storagesOrigin, prev.address) - } - s.setStateObject(newobj) - if prev != nil && !prev.deleted { - return newobj, prev - } - return newobj, nil +// createObject creates a new state object. The assumption is held there is no +// existing account with the given address, otherwise it will be silently overwritten. +func (s *StateDB) createObject(addr common.Address) *stateObject { + obj := newObject(s, addr, nil) + s.journal.append(createObjectChange{account: &addr}) + s.setStateObject(obj) + return obj } -// CreateAccount explicitly creates a state object. If a state object with the address -// already exists the balance is carried over to the new account. -// -// CreateAccount is called during the EVM CREATE operation. The situation might arise that -// a contract does the following: -// -// 1. sends funds to sha(account ++ (nonce + 1)) -// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) -// -// Carrying over the balance ensures that Ether doesn't disappear. +// CreateAccount explicitly creates a new state object, assuming that the +// account did not previously exist in the state. If the account already +// exists, this function will silently overwrite it which might lead to a +// consensus bug eventually. func (s *StateDB) CreateAccount(addr common.Address) { - newObj, prev := s.createObject(addr) - if prev != nil { - newObj.setBalance(prev.data.Balance) + s.createObject(addr) +} + +// CreateContract is used whenever a contract is created. This may be preceded +// by CreateAccount, but that is not required if it already existed in the +// state due to funds sent beforehand. +// This operation sets the 'newContract'-flag, which is required in order to +// correctly handle EIP-6780 'delete-in-same-transaction' logic. +func (s *StateDB) CreateContract(addr common.Address) { + obj := s.getStateObject(addr) + if !obj.newContract { + obj.newContract = true + s.journal.append(createContractChange{account: addr}) } } @@ -735,21 +731,25 @@ func (s *StateDB) Copy() *StateDB { db: s.db, trie: s.db.CopyTrie(s.trie), + hasher: crypto.NewKeccakState(), originalRoot: s.originalRoot, accounts: copySet(s.accounts), storages: copy2DSet(s.storages), accountsOrigin: copySet(s.accountsOrigin), storagesOrigin: copy2DSet(s.storagesOrigin), - stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), - stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), - stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), + stateObjects: make(map[common.Address]*stateObject, len(s.stateObjects)), stateObjectsDestruct: maps.Clone(s.stateObjectsDestruct), + mutations: make(map[common.Address]*mutation, len(s.mutations)), + dbErr: s.dbErr, refund: s.refund, + thash: s.thash, + txIndex: s.txIndex, logs: make(map[common.Hash][]*types.Log, len(s.logs)), logSize: s.logSize, preimages: maps.Clone(s.preimages), - journal: newJournal(), - hasher: crypto.NewKeccakState(), + journal: s.journal.copy(), + validRevisions: slices.Clone(s.validRevisions), + nextRevisionId: s.nextRevisionId, // In order for the block producer to be able to use and make additions // to the snapshot tree, we need to copy that as well. Otherwise, any @@ -758,39 +758,14 @@ func (s *StateDB) Copy() *StateDB { snaps: s.snaps, snap: s.snap, } - // Copy the dirty states, logs, and preimages - for addr := range s.journal.dirties { - // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), - // and in the Finalise-method, there is a case where an object is in the journal but not - // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for - // nil - if object, exist := s.stateObjects[addr]; exist { - // Even though the original object is dirty, we are not copying the journal, - // so we need to make sure that any side-effect the journal would have caused - // during a commit (or similar op) is already applied to the copy. - state.stateObjects[addr] = object.deepCopy(state) - - state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits - state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits - } - } - // Above, we don't copy the actual journal. This means that if the copy - // is copied, the loop above will be a no-op, since the copy's journal - // is empty. Thus, here we iterate over stateObjects, to enable copies - // of copies. - for addr := range s.stateObjectsPending { - if _, exist := state.stateObjects[addr]; !exist { - state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) - } - state.stateObjectsPending[addr] = struct{}{} + // Deep copy cached state objects. + for addr, obj := range s.stateObjects { + state.stateObjects[addr] = obj.deepCopy(state) } - for addr := range s.stateObjectsDirty { - if _, exist := state.stateObjects[addr]; !exist { - state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) - } - state.stateObjectsDirty[addr] = struct{}{} + // Deep copy the object state markers. + for addr, op := range s.mutations { + state.mutations[addr] = op.copy() } - // Deep copy the logs occurred in the scope of block for hash, logs := range s.logs { cpy := make([]*types.Log, len(logs)) @@ -879,7 +854,8 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { continue } if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { - obj.deleted = true + delete(s.stateObjects, obj.address) + s.markDelete(addr) // If ether was sent to account post-selfdestruct it is burnt. if bal := obj.Balance(); s.logger != nil && s.logger.OnBalanceChange != nil && obj.selfDestructed && bal.Sign() != 0 { @@ -900,11 +876,8 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) } else { obj.finalise(true) // Prefetch slots in the background + s.markUpdate(addr) } - obj.created = false - s.stateObjectsPending[addr] = struct{}{} - s.stateObjectsDirty[addr] = struct{}{} - // At this point, also ship the address off to the precacher. The precacher // will start loading tries, and when the change is eventually committed, // the commit-phase will be a lot faster @@ -943,24 +916,31 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // the account prefetcher. Instead, let's process all the storage updates // first, giving the account prefetches just a few more milliseconds of time // to pull useful data from disk. + start := time.Now() if s.deterministic { - addressesToUpdate := make([]common.Address, 0, len(s.stateObjectsPending)) - for addr := range s.stateObjectsPending { + addressesToUpdate := make([]common.Address, 0, len(s.mutations)) + for addr := range s.mutations { addressesToUpdate = append(addressesToUpdate, addr) } sort.Slice(addressesToUpdate, func(i, j int) bool { return bytes.Compare(addressesToUpdate[i][:], addressesToUpdate[j][:]) < 0 }) for _, addr := range addressesToUpdate { - if obj := s.stateObjects[addr]; !obj.deleted { - obj.updateRoot() + if obj := s.mutations[addr]; !obj.applied && !obj.isDelete() { + s.stateObjects[addr].updateRoot() } } } else { - for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; !obj.deleted { - obj.updateRoot() + for addr, op := range s.mutations { + if op.applied { + continue + } + if op.isDelete() { + continue } + s.stateObjects[addr].updateRoot() } } + s.StorageUpdates += time.Since(start) + // Now we're about to start to write changes to the trie. The trie is so far // _untouched_. We can check with the prefetcher, if it can give us a trie // which has the same root, but also has some content loaded into it. @@ -969,7 +949,6 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { s.trie = trie } } - usedAddrs := make([][]byte, 0, len(s.stateObjectsPending)) // Perform updates before deletions. This prevents resolution of unnecessary trie nodes // in circumstances similar to the following: // @@ -980,13 +959,21 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // If the self-destruct is handled first, then `P` would be left with only one child, thus collapsed // into a shortnode. This requires `B` to be resolved from disk. // Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved. - var deletedAddrs []common.Address - for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; !obj.deleted { - s.updateStateObject(obj) - s.AccountUpdated += 1 + var ( + usedAddrs [][]byte + deletedAddrs []common.Address + ) + for addr, op := range s.mutations { + if op.applied { + continue + } + op.applied = true + + if op.isDelete() { + deletedAddrs = append(deletedAddrs, addr) } else { - deletedAddrs = append(deletedAddrs, obj.address) + s.updateStateObject(s.stateObjects[addr]) + s.AccountUpdated += 1 } usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure } @@ -997,9 +984,6 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { if prefetcher != nil { prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) } - if len(s.stateObjectsPending) > 0 { - s.stateObjectsPending = make(map[common.Address]struct{}) - } // Track the amount of time wasted on hashing the account trie defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) @@ -1240,41 +1224,105 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er storageTrieNodesUpdated int storageTrieNodesDeleted int nodes = trienode.NewMergedNodeSet() - codeWriter = s.db.DiskDB().NewBatch() wasmCodeWriter = s.db.WasmStore().NewBatch() ) // Handle all state deletions first if err := s.handleDestruction(nodes); err != nil { return common.Hash{}, err } - // Handle all state updates afterwards - for addr := range s.stateObjectsDirty { - obj := s.stateObjects[addr] - if obj.deleted { + // Handle all state updates afterwards, concurrently to one another to shave + // off some milliseconds from the commit operation. Also accumulate the code + // writes to run in parallel with the computations. + start := time.Now() + var ( + code = s.db.DiskDB().NewBatch() + lock sync.Mutex + root common.Hash + workers errgroup.Group + ) + // Schedule the account trie first since that will be the biggest, so give + // it the most time to crunch. + // + // TODO(karalabe): This account trie commit is *very* heavy. 5-6ms at chain + // heads, which seems excessive given that it doesn't do hashing, it just + // shuffles some data. For comparison, the *hashing* at chain head is 2-3ms. + // We need to investigate what's happening as it seems something's wonky. + // Obviously it's not an end of the world issue, just something the original + // code didn't anticipate for. + workers.Go(func() error { + // Write the account trie changes, measuring the amount of wasted time + newroot, set, err := s.trie.Commit(true) + if err != nil { + return err + } + root = newroot + + // Merge the dirty nodes of account trie into global set + lock.Lock() + defer lock.Unlock() + + if set != nil { + if err = nodes.Merge(set); err != nil { + return err + } + accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size() + } + s.AccountCommits = time.Since(start) + return nil + }) + // Schedule each of the storage tries that need to be updated, so they can + // run concurrently to one another. + // + // TODO(karalabe): Experimentally, the account commit takes approximately the + // same time as all the storage commits combined, so we could maybe only have + // 2 threads in total. But that kind of depends on the account commit being + // more expensive than it should be, so let's fix that and revisit this todo. + for addr, op := range s.mutations { + if op.isDelete() { continue } // Write any contract code associated with the state object + obj := s.stateObjects[addr] if obj.code != nil && obj.dirtyCode { - rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) + rawdb.WriteCode(code, common.BytesToHash(obj.CodeHash()), obj.code) obj.dirtyCode = false } - // Write any storage changes in the state object to its storage trie - set, err := obj.commit() - if err != nil { - return common.Hash{}, err - } - // Merge the dirty nodes of storage trie into global set. It is possible - // that the account was destructed and then resurrected in the same block. - // In this case, the node set is shared by both accounts. - if set != nil { - if err := nodes.Merge(set); err != nil { - return common.Hash{}, err + // Run the storage updates concurrently to one another + workers.Go(func() error { + // Write any storage changes in the state object to its storage trie + set, err := obj.commit() + if err != nil { + return err } - updates, deleted := set.Size() - storageTrieNodesUpdated += updates - storageTrieNodesDeleted += deleted - } + // Merge the dirty nodes of storage trie into global set. It is possible + // that the account was destructed and then resurrected in the same block. + // In this case, the node set is shared by both accounts. + lock.Lock() + defer lock.Unlock() + + if set != nil { + if err = nodes.Merge(set); err != nil { + return err + } + updates, deleted := set.Size() + storageTrieNodesUpdated += updates + storageTrieNodesDeleted += deleted + } + s.StorageCommits = time.Since(start) // overwrite with the longest storage commit runtime + return nil + }) } + // Schedule the code commits to run concurrently too. This shouldn't really + // take much since we don't often commit code, but since it's disk access, + // it's always yolo. + workers.Go(func() error { + if code.ValueSize() > 0 { + if err := code.Write(); err != nil { + log.Crit("Failed to commit dirty codes", "error", err) + } + } + return nil + }) // Arbitrum: write Stylus programs to disk for moduleHash, info := range s.arbExtraData.activatedWasms { @@ -1284,33 +1332,15 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er s.arbExtraData.activatedWasms = make(map[common.Hash]*ActivatedWasm) } - if codeWriter.ValueSize() > 0 { - if err := codeWriter.Write(); err != nil { - log.Crit("Failed to commit dirty codes", "error", err) - } - } if wasmCodeWriter.ValueSize() > 0 { if err := wasmCodeWriter.Write(); err != nil { log.Crit("Failed to commit dirty stylus codes", "error", err) } } - // Write the account trie changes, measuring the amount of wasted time - start := time.Now() - - root, set, err := s.trie.Commit(true) - if err != nil { + // Wait for everything to finish and update the metrics + if err := workers.Wait(); err != nil { return common.Hash{}, err } - // Merge the dirty nodes of account trie into global set - if set != nil { - if err := nodes.Merge(set); err != nil { - return common.Hash{}, err - } - accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size() - } - // Report the commit metrics - s.AccountCommits += time.Since(start) - accountUpdatedMeter.Mark(int64(s.AccountUpdated)) storageUpdatedMeter.Mark(int64(s.StorageUpdated)) accountDeletedMeter.Mark(int64(s.AccountDeleted)) @@ -1330,12 +1360,12 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er if err := s.snaps.Update(root, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil { log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err) } - // Keep 128 diff layers in the memory, persistent layer is 129th. + // Keep TriesInMemory diff layers in the memory, persistent layer is 129th. // - head layer is paired with HEAD state // - head-1 layer is paired with HEAD-1 state // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state - if err := s.snaps.Cap(root, 128); err != nil { - log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err) + if err := s.snaps.Cap(root, TriesInMemory); err != nil { + log.Warn("Failed to cap snapshot tree", "root", root, "layers", TriesInMemory, "err", err) } } s.SnapshotCommits += time.Since(start) @@ -1369,7 +1399,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er s.storages = make(map[common.Hash]map[common.Hash][]byte) s.accountsOrigin = make(map[common.Address][]byte) s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte) - s.stateObjectsDirty = make(map[common.Address]struct{}) + s.mutations = make(map[common.Address]*mutation) s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) return root, nil } @@ -1484,3 +1514,19 @@ func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common. } return copied } + +func (s *StateDB) markDelete(addr common.Address) { + if _, ok := s.mutations[addr]; !ok { + s.mutations[addr] = &mutation{} + } + s.mutations[addr].applied = false + s.mutations[addr].typ = deletion +} + +func (s *StateDB) markUpdate(addr common.Address) { + if _, ok := s.mutations[addr]; !ok { + s.mutations[addr] = &mutation{} + } + s.mutations[addr].applied = false + s.mutations[addr].typ = update +} diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go index 7594246ded..ca676160e0 100644 --- a/core/state/statedb_fuzz_test.go +++ b/core/state/statedb_fuzz_test.go @@ -96,7 +96,9 @@ func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction { name: "CreateAccount", fn: func(a testAction, s *StateDB) { - s.CreateAccount(addr) + if !s.Exist(addr) { + s.CreateAccount(addr) + } }, }, { diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 0757a5d720..e32257abf2 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -21,9 +21,11 @@ import ( "encoding/binary" "errors" "fmt" + "maps" "math" "math/rand" "reflect" + "slices" "strings" "sync" "testing" @@ -224,6 +226,78 @@ func TestCopy(t *testing.T) { } } +// TestCopyWithDirtyJournal tests if Copy can correct create a equal copied +// stateDB with dirty journal present. +func TestCopyWithDirtyJournal(t *testing.T) { + db := NewDatabase(rawdb.NewMemoryDatabase()) + orig, _ := New(types.EmptyRootHash, db, nil) + + // Fill up the initial states + for i := byte(0); i < 255; i++ { + obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i})) + obj.AddBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified) + obj.data.Root = common.HexToHash("0xdeadbeef") + orig.updateStateObject(obj) + } + root, _ := orig.Commit(0, true) + orig, _ = New(root, db, nil) + + // modify all in memory without finalizing + for i := byte(0); i < 255; i++ { + obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i})) + obj.SubBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified) + orig.updateStateObject(obj) + } + cpy := orig.Copy() + + orig.Finalise(true) + for i := byte(0); i < 255; i++ { + root := orig.GetStorageRoot(common.BytesToAddress([]byte{i})) + if root != (common.Hash{}) { + t.Errorf("Unexpected storage root %x", root) + } + } + cpy.Finalise(true) + for i := byte(0); i < 255; i++ { + root := cpy.GetStorageRoot(common.BytesToAddress([]byte{i})) + if root != (common.Hash{}) { + t.Errorf("Unexpected storage root %x", root) + } + } + if cpy.IntermediateRoot(true) != orig.IntermediateRoot(true) { + t.Error("State is not equal after copy") + } +} + +// TestCopyObjectState creates an original state, S1, and makes a copy S2. +// It then proceeds to make changes to S1. Those changes are _not_ supposed +// to affect S2. This test checks that the copy properly deep-copies the objectstate +func TestCopyObjectState(t *testing.T) { + db := NewDatabase(rawdb.NewMemoryDatabase()) + orig, _ := New(types.EmptyRootHash, db, nil) + + // Fill up the initial states + for i := byte(0); i < 5; i++ { + obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i})) + obj.AddBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified) + obj.data.Root = common.HexToHash("0xdeadbeef") + orig.updateStateObject(obj) + } + orig.Finalise(true) + cpy := orig.Copy() + for _, op := range cpy.mutations { + if have, want := op.applied, false; have != want { + t.Fatalf("Error in test itself, the 'done' flag should not be set before Commit, have %v want %v", have, want) + } + } + orig.Commit(0, true) + for _, op := range cpy.mutations { + if have, want := op.applied, false; have != want { + t.Fatalf("Error: original state affected copy, have %v want %v", have, want) + } + } +} + func TestSnapshotRandom(t *testing.T) { config := &quick.Config{MaxCount: 1000} err := quick.Check((*snapshotTest).run, config) @@ -307,7 +381,30 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction { { name: "CreateAccount", fn: func(a testAction, s *StateDB) { - s.CreateAccount(addr) + if !s.Exist(addr) { + s.CreateAccount(addr) + } + }, + }, + { + name: "CreateContract", + fn: func(a testAction, s *StateDB) { + if !s.Exist(addr) { + s.CreateAccount(addr) + } + contractHash := s.GetCodeHash(addr) + emptyCode := contractHash == (common.Hash{}) || contractHash == types.EmptyCodeHash + storageRoot := s.GetStorageRoot(addr) + emptyStorage := storageRoot == (common.Hash{}) || storageRoot == types.EmptyRootHash + if s.GetNonce(addr) == 0 && emptyCode && emptyStorage { + s.CreateContract(addr) + // We also set some code here, to prevent the + // CreateContract action from being performed twice in a row, + // which would cause a difference in state when unrolling + // the journal. (CreateContact assumes created was false prior to + // invocation, and the journal rollback sets it to false). + s.SetCode(addr, []byte{1}) + } }, }, { @@ -467,6 +564,10 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { checkeq("GetCode", state.GetCode(addr), checkstate.GetCode(addr)) checkeq("GetCodeHash", state.GetCodeHash(addr), checkstate.GetCodeHash(addr)) checkeq("GetCodeSize", state.GetCodeSize(addr), checkstate.GetCodeSize(addr)) + // Check newContract-flag + if obj := state.getStateObject(addr); obj != nil { + checkeq("IsNewContract", obj.newContract, checkstate.getStateObject(addr).newContract) + } // Check storage. if obj := state.getStateObject(addr); obj != nil { forEachStorage(state, addr, func(key, value common.Hash) bool { @@ -475,12 +576,49 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { forEachStorage(checkstate, addr, func(key, value common.Hash) bool { return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value) }) + other := checkstate.getStateObject(addr) + // Check dirty storage which is not in trie + if !maps.Equal(obj.dirtyStorage, other.dirtyStorage) { + print := func(dirty map[common.Hash]common.Hash) string { + var keys []common.Hash + out := new(strings.Builder) + for key := range dirty { + keys = append(keys, key) + } + slices.SortFunc(keys, common.Hash.Cmp) + for i, key := range keys { + fmt.Fprintf(out, " %d. %v %v\n", i, key, dirty[key]) + } + return out.String() + } + return fmt.Errorf("dirty storage err, have\n%v\nwant\n%v", + print(obj.dirtyStorage), + print(other.dirtyStorage)) + } + } + // Check transient storage. + { + have := state.transientStorage + want := checkstate.transientStorage + eq := maps.EqualFunc(have, want, + func(a Storage, b Storage) bool { + return maps.Equal(a, b) + }) + if !eq { + return fmt.Errorf("transient storage differs ,have\n%v\nwant\n%v", + have.PrettyPrint(), + want.PrettyPrint()) + } } if err != nil { return err } } - + if !checkstate.accessList.Equal(state.accessList) { // Check access lists + return fmt.Errorf("AccessLists are wrong, have \n%v\nwant\n%v", + checkstate.accessList.PrettyPrint(), + state.accessList.PrettyPrint()) + } if state.GetRefund() != checkstate.GetRefund() { return fmt.Errorf("got GetRefund() == %d, want GetRefund() == %d", state.GetRefund(), checkstate.GetRefund()) @@ -489,6 +627,23 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { return fmt.Errorf("got GetLogs(common.Hash{}) == %v, want GetLogs(common.Hash{}) == %v", state.GetLogs(common.Hash{}, 0, common.Hash{}), checkstate.GetLogs(common.Hash{}, 0, common.Hash{})) } + if !maps.Equal(state.journal.dirties, checkstate.journal.dirties) { + getKeys := func(dirty map[common.Address]int) string { + var keys []common.Address + out := new(strings.Builder) + for key := range dirty { + keys = append(keys, key) + } + slices.SortFunc(keys, common.Address.Cmp) + for i, key := range keys { + fmt.Fprintf(out, " %d. %v\n", i, key) + } + return out.String() + } + have := getKeys(state.journal.dirties) + want := getKeys(checkstate.journal.dirties) + return fmt.Errorf("dirty-journal set mismatch.\nhave:\n%v\nwant:\n%v\n", have, want) + } return nil } @@ -671,18 +826,19 @@ func TestCopyCopyCommitCopy(t *testing.T) { } } -// TestCommitCopy tests the copy from a committed state is not functional. +// TestCommitCopy tests the copy from a committed state is not fully functional. func TestCommitCopy(t *testing.T) { - state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) + db := NewDatabase(rawdb.NewMemoryDatabase()) + state, _ := New(types.EmptyRootHash, db, nil) // Create an account and check if the retrieved balance is correct addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") - skey := common.HexToHash("aaa") - sval := common.HexToHash("bbb") + skey1, skey2 := common.HexToHash("a1"), common.HexToHash("a2") + sval1, sval2 := common.HexToHash("b1"), common.HexToHash("b2") state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie state.SetCode(addr, []byte("hello")) // Change an external metadata - state.SetState(addr, skey, sval) // Change the storage trie + state.SetState(addr, skey1, sval1) // Change the storage trie if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 { t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42) @@ -690,25 +846,38 @@ func TestCommitCopy(t *testing.T) { if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) { t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello")) } - if val := state.GetState(addr, skey); val != sval { - t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval) + if val := state.GetState(addr, skey1); val != sval1 { + t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval1) } - if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) { + if val := state.GetCommittedState(addr, skey1); val != (common.Hash{}) { t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{}) } - // Copy the committed state database, the copied one is not functional. - state.Commit(0, true) + root, _ := state.Commit(0, true) + + state, _ = New(root, db, nil) + state.SetState(addr, skey2, sval2) + state.Commit(1, true) + + // Copy the committed state database, the copied one is not fully functional. copied := state.Copy() - if balance := copied.GetBalance(addr); balance.Cmp(uint256.NewInt(0)) != 0 { + if balance := copied.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 { t.Fatalf("unexpected balance: have %v", balance) } - if code := copied.GetCode(addr); code != nil { + if code := copied.GetCode(addr); !bytes.Equal(code, []byte("hello")) { t.Fatalf("unexpected code: have %x", code) } - if val := copied.GetState(addr, skey); val != (common.Hash{}) { + // Miss slots because of non-functional trie after commit + if val := copied.GetState(addr, skey1); val != (common.Hash{}) { + t.Fatalf("unexpected storage slot: have %x", sval1) + } + if val := copied.GetCommittedState(addr, skey1); val != (common.Hash{}) { t.Fatalf("unexpected storage slot: have %x", val) } - if val := copied.GetCommittedState(addr, skey); val != (common.Hash{}) { + // Slots cached in the stateDB, available after commit + if val := copied.GetState(addr, skey2); val != sval2 { + t.Fatalf("unexpected storage slot: have %x", sval1) + } + if val := copied.GetCommittedState(addr, skey2); val != sval2 { t.Fatalf("unexpected storage slot: have %x", val) } if !errors.Is(copied.Error(), trie.ErrCommitted) { @@ -1065,40 +1234,6 @@ func TestStateDBTransientStorage(t *testing.T) { } } -func TestResetObject(t *testing.T) { - var ( - disk = rawdb.NewMemoryDatabase() - tdb = triedb.NewDatabase(disk, nil) - db = NewDatabaseWithNodeDB(disk, tdb) - snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash) - state, _ = New(types.EmptyRootHash, db, snaps) - addr = common.HexToAddress("0x1") - slotA = common.HexToHash("0x1") - slotB = common.HexToHash("0x2") - ) - // Initialize account with balance and storage in first transaction. - state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified) - state.SetState(addr, slotA, common.BytesToHash([]byte{0x1})) - state.IntermediateRoot(true) - - // Reset account and mutate balance and storages - state.CreateAccount(addr) - state.SetBalance(addr, uint256.NewInt(2), tracing.BalanceChangeUnspecified) - state.SetState(addr, slotB, common.BytesToHash([]byte{0x2})) - root, _ := state.Commit(0, true) - - // Ensure the original account is wiped properly - snap := snaps.Snapshot(root) - slot, _ := snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotA.Bytes())) - if len(slot) != 0 { - t.Fatalf("Unexpected storage slot") - } - slot, _ = snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotB.Bytes())) - if !bytes.Equal(slot, []byte{0x2}) { - t.Fatalf("Unexpected storage slot value %v", slot) - } -} - func TestDeleteStorage(t *testing.T) { var ( disk = rawdb.NewMemoryDatabase() diff --git a/core/state/transient_storage.go b/core/state/transient_storage.go index 66e563efa7..e63db39eba 100644 --- a/core/state/transient_storage.go +++ b/core/state/transient_storage.go @@ -17,6 +17,10 @@ package state import ( + "fmt" + "slices" + "strings" + "github.com/ethereum/go-ethereum/common" ) @@ -30,10 +34,19 @@ func newTransientStorage() transientStorage { // Set sets the transient-storage `value` for `key` at the given `addr`. func (t transientStorage) Set(addr common.Address, key, value common.Hash) { - if _, ok := t[addr]; !ok { - t[addr] = make(Storage) + if value == (common.Hash{}) { // this is a 'delete' + if _, ok := t[addr]; ok { + delete(t[addr], key) + if len(t[addr]) == 0 { + delete(t, addr) + } + } + } else { + if _, ok := t[addr]; !ok { + t[addr] = make(Storage) + } + t[addr][key] = value } - t[addr][key] = value } // Get gets the transient storage for `key` at the given `addr`. @@ -53,3 +66,27 @@ func (t transientStorage) Copy() transientStorage { } return storage } + +// PrettyPrint prints the contents of the access list in a human-readable form +func (t transientStorage) PrettyPrint() string { + out := new(strings.Builder) + var sortedAddrs []common.Address + for addr := range t { + sortedAddrs = append(sortedAddrs, addr) + slices.SortFunc(sortedAddrs, common.Address.Cmp) + } + + for _, addr := range sortedAddrs { + fmt.Fprintf(out, "%#x:", addr) + var sortedKeys []common.Hash + storage := t[addr] + for key := range storage { + sortedKeys = append(sortedKeys, key) + } + slices.SortFunc(sortedKeys, common.Hash.Cmp) + for _, key := range sortedKeys { + fmt.Fprintf(out, " %X : %X\n", key, storage[key]) + } + } + return out.String() +} diff --git a/core/state_processor.go b/core/state_processor.go index 40c4181335..ae5d57812b 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -197,6 +197,13 @@ func ApplyTransactionWithResultFilter(config *params.ChainConfig, bc ChainContex // ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root // contract. This method is exported to be used in tests. func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *state.StateDB) { + if vmenv.Config.Tracer != nil && vmenv.Config.Tracer.OnSystemCallStart != nil { + vmenv.Config.Tracer.OnSystemCallStart() + } + if vmenv.Config.Tracer != nil && vmenv.Config.Tracer.OnSystemCallEnd != nil { + defer vmenv.Config.Tracer.OnSystemCallEnd() + } + // If EIP-4788 is enabled, we need to invoke the beaconroot storage contract with // the new root msg := &Message{ diff --git a/core/state_processor_test.go b/core/state_processor_test.go index ee86b96c9a..c5642eeffa 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -418,10 +418,11 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr header.ParentBeaconRoot = &beaconRoot } // Assemble and return the final block for sealing + body := &types.Body{Transactions: txs} if config.IsShanghai(header.Number, header.Time, types.DeserializeHeaderExtraInformation(header).ArbOSFormatVersion) { - return types.NewBlockWithWithdrawals(header, txs, nil, receipts, []*types.Withdrawal{}, trie.NewStackTrie(nil)) + body.Withdrawals = []*types.Withdrawal{} } - return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) + return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)) } var ( diff --git a/core/state_transition.go b/core/state_transition.go index 20602a825a..2856d05681 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -523,8 +523,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } else { fee := new(uint256.Int).SetUint64(st.gasUsed()) fee.Mul(fee, effectiveTipU256) - st.state.AddBalance(tipReceipient, fee, tracing.BalanceIncreaseRewardTransactionFee) - tipAmount = fee.ToBig() + st.state.AddBalance(st.evm.Context.Coinbase, fee, tracing.BalanceIncreaseRewardTransactionFee) } // Arbitrum: record the tip diff --git a/core/tracing/CHANGELOG.md b/core/tracing/CHANGELOG.md index 77eda4ad76..93b91cf479 100644 --- a/core/tracing/CHANGELOG.md +++ b/core/tracing/CHANGELOG.md @@ -4,6 +4,15 @@ All notable changes to the tracing interface will be documented in this file. ## [Unreleased] +There have been minor backwards-compatible changes to the tracing interface to explicitly mark the execution of **system** contracts. As of now the only system call updates the parent beacon block root as per [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788). Other system calls are being considered for the future hardfork. + +### New methods + +- `OnSystemCallStart()`: This hook is called when EVM starts processing a system call. Note system calls happen outside the scope of a transaction. This event will be followed by normal EVM execution events. +- `OnSystemCallEnd()`: This hook is called when EVM finishes processing a system call. + +## [v1.14.0] + There has been a major breaking change in the tracing interface for custom native tracers. JS and built-in tracers are not affected by this change and tracing API methods may be used as before. This overhaul has been done as part of the new live tracing feature ([#29189](https://github.com/ethereum/go-ethereum/pull/29189)). To learn more about live tracing please refer to the [docs](https://geth.ethereum.org/docs/developers/evm-tracing/live-tracing). **The `EVMLogger` interface which the tracers implemented has been removed.** It has been replaced by a new struct `tracing.Hooks`. `Hooks` keeps pointers to event listening functions. Internally the EVM will use these function pointers to emit events and can skip an event if the tracer has opted not to implement it. In fact this is the main reason for this change of approach. Another benefit is the ease of adding new hooks in future, and dynamically assigning event receivers. @@ -66,4 +75,5 @@ The hooks `CaptureStart` and `CaptureEnd` have been removed. These hooks signale - `CaptureState` -> `OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error)`. `op` is of type `byte` which can be cast to `vm.OpCode` when necessary. A `*vm.ScopeContext` is not passed anymore. It is replaced by `tracing.OpContext` which offers access to the memory, stack and current contract. - `CaptureFault` -> `OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error)`. Similar to above. -[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.13.14...master \ No newline at end of file +[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.14.0...master +[v1.14.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0 \ No newline at end of file diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go index 8235f08f85..f95de98b94 100644 --- a/core/tracing/hooks.go +++ b/core/tracing/hooks.go @@ -84,6 +84,10 @@ type ( TxEndHook = func(receipt *types.Receipt, err error) // EnterHook is invoked when the processing of a message starts. + // + // Take note that EnterHook, when in the context of a live tracer, can be invoked + // outside of the `OnTxStart` and `OnTxEnd` hooks when dealing with system calls, + // see [OnSystemCallStartHook] and [OnSystemCallEndHook] for more information. EnterHook = func(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) // ExitHook is invoked when the processing of a message ends. @@ -92,6 +96,10 @@ type ( // ran out of gas when attempting to persist the code to database did not // count as a call failure and did not cause a revert of the call. This will // be indicated by `reverted == false` and `err == ErrCodeStoreOutOfGas`. + // + // Take note that ExitHook, when in the context of a live tracer, can be invoked + // outside of the `OnTxStart` and `OnTxEnd` hooks when dealing with system calls, + // see [OnSystemCallStartHook] and [OnSystemCallEndHook] for more information. ExitHook = func(depth int, output []byte, gasUsed uint64, err error, reverted bool) // OpcodeHook is invoked just prior to the execution of an opcode. @@ -128,6 +136,22 @@ type ( // GenesisBlockHook is called when the genesis block is being processed. GenesisBlockHook = func(genesis *types.Block, alloc types.GenesisAlloc) + // OnSystemCallStartHook is called when a system call is about to be executed. Today, + // this hook is invoked when the EIP-4788 system call is about to be executed to set the + // beacon block root. + // + // After this hook, the EVM call tracing will happened as usual so you will receive a `OnEnter/OnExit` + // as well as state hooks between this hook and the `OnSystemCallEndHook`. + // + // Note that system call happens outside normal transaction execution, so the `OnTxStart/OnTxEnd` hooks + // will not be invoked. + OnSystemCallStartHook = func() + + // OnSystemCallEndHook is called when a system call has finished executing. Today, + // this hook is invoked when the EIP-4788 system call is about to be executed to set the + // beacon block root. + OnSystemCallEndHook = func() + /* - State events - */ @@ -164,12 +188,14 @@ type Hooks struct { OnFault FaultHook OnGasChange GasChangeHook // Chain events - OnBlockchainInit BlockchainInitHook - OnClose CloseHook - OnBlockStart BlockStartHook - OnBlockEnd BlockEndHook - OnSkippedBlock SkippedBlockHook - OnGenesisBlock GenesisBlockHook + OnBlockchainInit BlockchainInitHook + OnClose CloseHook + OnBlockStart BlockStartHook + OnBlockEnd BlockEndHook + OnSkippedBlock SkippedBlockHook + OnGenesisBlock GenesisBlockHook + OnSystemCallStart OnSystemCallStartHook + OnSystemCallEnd OnSystemCallEndHook // State events OnBalanceChange BalanceChangeHook OnNonceChange NonceChangeHook diff --git a/core/txindexer_test.go b/core/txindexer_test.go index 7b5ff1f206..0a606ed8fa 100644 --- a/core/txindexer_test.go +++ b/core/txindexer_test.go @@ -18,7 +18,6 @@ package core import ( "math/big" - "os" "testing" "github.com/ethereum/go-ethereum/common" @@ -211,8 +210,7 @@ func TestTxIndexer(t *testing.T) { }, } for _, c := range cases { - frdir := t.TempDir() - db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) + db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0)) // Index the initial blocks from ancient store @@ -238,6 +236,5 @@ func TestTxIndexer(t *testing.T) { verify(db, 0, indexer) db.Close() - os.RemoveAll(frdir) } } diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index 68d7b6f411..c86991c942 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -87,7 +87,7 @@ func (bc *testBlockChain) CurrentBlock() *types.Header { } func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { - return types.NewBlock(bc.CurrentBlock(), nil, nil, nil, trie.NewStackTrie(nil)) + return types.NewBlock(bc.CurrentBlock(), nil, nil, trie.NewStackTrie(nil)) } func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { diff --git a/core/types/block.go b/core/types/block.go index 53054f52d3..4857cd6e50 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -23,6 +23,7 @@ import ( "io" "math/big" "reflect" + "slices" "sync/atomic" "time" @@ -217,13 +218,19 @@ type extblock struct { // NewBlock creates a new block. The input data is copied, changes to header and to the // field values will not affect the block. // -// The values of TxHash, UncleHash, ReceiptHash and Bloom in header -// are ignored and set to values derived from the given txs, uncles -// and receipts. -func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher TrieHasher) *Block { - b := &Block{header: CopyHeader(header)} +// The body elements and the receipts are used to recompute and overwrite the +// relevant portions of the header. +func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher TrieHasher) *Block { + if body == nil { + body = &Body{} + } + var ( + b = NewBlockWithHeader(header) + txs = body.Transactions + uncles = body.Uncles + withdrawals = body.Withdrawals + ) - // TODO: panic if len(txs) != len(receipts) if len(txs) == 0 { b.header.TxHash = EmptyTxsHash } else { @@ -249,27 +256,18 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* } } - return b -} - -// NewBlockWithWithdrawals creates a new block with withdrawals. The input data is copied, -// changes to header and to the field values will not affect the block. -// -// The values of TxHash, UncleHash, ReceiptHash and Bloom in header are ignored and set to -// values derived from the given txs, uncles and receipts. -func NewBlockWithWithdrawals(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, withdrawals []*Withdrawal, hasher TrieHasher) *Block { - b := NewBlock(header, txs, uncles, receipts, hasher) - if withdrawals == nil { b.header.WithdrawalsHash = nil } else if len(withdrawals) == 0 { b.header.WithdrawalsHash = &EmptyWithdrawalsHash + b.withdrawals = Withdrawals{} } else { - h := DeriveSha(Withdrawals(withdrawals), hasher) - b.header.WithdrawalsHash = &h + hash := DeriveSha(Withdrawals(withdrawals), hasher) + b.header.WithdrawalsHash = &hash + b.withdrawals = slices.Clone(withdrawals) } - return b.WithWithdrawals(withdrawals) + return b } // CopyHeader creates a deep copy of a block header. @@ -453,31 +451,17 @@ func (b *Block) WithSeal(header *Header) *Block { } } -// WithBody returns a copy of the block with the given transaction and uncle contents. -func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block { - block := &Block{ - header: b.header, - transactions: make([]*Transaction, len(transactions)), - uncles: make([]*Header, len(uncles)), - withdrawals: b.withdrawals, - } - copy(block.transactions, transactions) - for i := range uncles { - block.uncles[i] = CopyHeader(uncles[i]) - } - return block -} - -// WithWithdrawals returns a copy of the block containing the given withdrawals. -func (b *Block) WithWithdrawals(withdrawals []*Withdrawal) *Block { +// WithBody returns a new block with the original header and a deep copy of the +// provided body. +func (b *Block) WithBody(body Body) *Block { block := &Block{ header: b.header, - transactions: b.transactions, - uncles: b.uncles, + transactions: slices.Clone(body.Transactions), + uncles: make([]*Header, len(body.Uncles)), + withdrawals: slices.Clone(body.Withdrawals), } - if withdrawals != nil { - block.withdrawals = make([]*Withdrawal, len(withdrawals)) - copy(block.withdrawals, withdrawals) + for i := range body.Uncles { + block.uncles[i] = CopyHeader(body.Uncles[i]) } return block } diff --git a/core/types/block_test.go b/core/types/block_test.go index 982d002242..1af5b9d7bf 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -254,7 +254,7 @@ func makeBenchBlock() *Block { Extra: []byte("benchmark uncle"), } } - return NewBlock(header, txs, uncles, receipts, blocktest.NewHasher()) + return NewBlock(header, &Body{Transactions: txs, Uncles: uncles}, receipts, blocktest.NewHasher()) } func TestRlpDecodeParentHash(t *testing.T) { diff --git a/core/vm/contracts.go b/core/vm/contracts.go index c1264b3e24..d1e5dd7a33 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -112,12 +112,6 @@ var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{0xa}): &kzgPointEvaluation{}, } -// PrecompiledContractsP256Verify contains the precompiled Ethereum -// contract specified in EIP-7212. -var PrecompiledContractsP256Verify = map[common.Address]PrecompiledContract{ - common.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{}, -} - // PrecompiledContractsPrague contains the set of pre-compiled Ethereum // contracts used in the Prague release. var PrecompiledContractsPrague = map[common.Address]PrecompiledContract{ @@ -730,6 +724,8 @@ func (c *bls12381G1Add) Run(input []byte) ([]byte, error) { return nil, err } + // No need to check the subgroup here, as specified by EIP-2537 + // Compute r = p_0 + p_1 p0.Add(p0, p1) @@ -759,6 +755,11 @@ func (c *bls12381G1Mul) Run(input []byte) ([]byte, error) { if p0, err = decodePointG1(input[:128]); err != nil { return nil, err } + // 'point is on curve' check already done, + // Here we need to apply subgroup checks. + if !p0.IsInSubGroup() { + return nil, errBLS12381G1PointSubgroup + } // Decode scalar value e := new(big.Int).SetBytes(input[128:]) @@ -812,6 +813,11 @@ func (c *bls12381G1MultiExp) Run(input []byte) ([]byte, error) { if err != nil { return nil, err } + // 'point is on curve' check already done, + // Here we need to apply subgroup checks. + if !p.IsInSubGroup() { + return nil, errBLS12381G1PointSubgroup + } points[i] = *p // Decode scalar value scalars[i] = *new(fr.Element).SetBytes(input[t1:t2]) @@ -852,6 +858,8 @@ func (c *bls12381G2Add) Run(input []byte) ([]byte, error) { return nil, err } + // No need to check the subgroup here, as specified by EIP-2537 + // Compute r = p_0 + p_1 r := new(bls12381.G2Affine) r.Add(p0, p1) @@ -882,6 +890,11 @@ func (c *bls12381G2Mul) Run(input []byte) ([]byte, error) { if p0, err = decodePointG2(input[:256]); err != nil { return nil, err } + // 'point is on curve' check already done, + // Here we need to apply subgroup checks. + if !p0.IsInSubGroup() { + return nil, errBLS12381G2PointSubgroup + } // Decode scalar value e := new(big.Int).SetBytes(input[256:]) @@ -935,6 +948,11 @@ func (c *bls12381G2MultiExp) Run(input []byte) ([]byte, error) { if err != nil { return nil, err } + // 'point is on curve' check already done, + // Here we need to apply subgroup checks. + if !p.IsInSubGroup() { + return nil, errBLS12381G2PointSubgroup + } points[i] = *p // Decode scalar value scalars[i] = *new(fr.Element).SetBytes(input[t1:t2]) @@ -1124,9 +1142,6 @@ func (c *bls12381MapG1) Run(input []byte) ([]byte, error) { // Compute mapping r := bls12381.MapToG1(fe) - if err != nil { - return nil, err - } // Encode the G1 point to 128 bytes return encodePointG1(&r), nil @@ -1160,9 +1175,6 @@ func (c *bls12381MapG2) Run(input []byte) ([]byte, error) { // Compute mapping r := bls12381.MapToG2(bls12381.E2{A0: c0, A1: c1}) - if err != nil { - return nil, err - } // Encode the G2 point to 256 bytes return encodePointG2(&r), nil diff --git a/core/vm/evm.go b/core/vm/evm.go index 57917a9a32..8e8dda5a23 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -458,14 +458,15 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, return nil, common.Address{}, gas, ErrNonceUintOverflow } evm.StateDB.SetNonce(caller.Address(), nonce+1) - // We add this to the access list _before_ taking a snapshot. Even if the creation fails, - // the access-list change should not be rolled back + + // We add this to the access list _before_ taking a snapshot. Even if the + // creation fails, the access-list change should not be rolled back. if evm.chainRules.IsBerlin { evm.StateDB.AddAddressToAccessList(address) } // Ensure there's no existing contract already at the designated address. // Account is regarded as existent if any of these three conditions is met: - // - the nonce is nonzero + // - the nonce is non-zero // - the code is non-empty // - the storage is non-empty contractHash := evm.StateDB.GetCodeHash(address) @@ -478,9 +479,19 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } return nil, common.Address{}, 0, ErrContractAddressCollision } - // Create a new account on the state + // Create a new account on the state only if the object was not present. + // It might be possible the contract code is deployed to a pre-existent + // account with non-zero balance. snapshot := evm.StateDB.Snapshot() - evm.StateDB.CreateAccount(address) + if !evm.StateDB.Exist(address) { + evm.StateDB.CreateAccount(address) + } + // CreateContract means that regardless of whether the account previously existed + // in the state trie or not, it _now_ becomes created as a _contract_ account. + // This is performed _prior_ to executing the initcode, since the initcode + // acts inside that account. + evm.StateDB.CreateContract(address) + if evm.chainRules.IsEIP158 { evm.StateDB.SetNonce(address, 1) } diff --git a/core/vm/instructions.go b/core/vm/instructions.go index c921f768f6..a63426f217 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -175,11 +175,7 @@ func opByte(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt func opAddmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek() - if z.IsZero() { - z.Clear() - } else { - z.AddMod(&x, &y, z) - } + z.AddMod(&x, &y, z) return nil, nil } diff --git a/core/vm/interface.go b/core/vm/interface.go index 132d8d4ec1..0818f75b32 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -48,6 +48,7 @@ type StateDB interface { Database() state.Database CreateAccount(common.Address) + CreateContract(common.Address) SubBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason) AddBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason) diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index 34ad2d3d4f..39021fb6b8 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -779,7 +779,7 @@ func setBlockhash(data *engine.ExecutableData) *engine.ExecutableData { Extra: data.ExtraData, MixDigest: data.Random, } - block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */) + block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs}) data.BlockHash = block.Hash() return data } @@ -935,7 +935,7 @@ func TestNewPayloadOnInvalidTerminalBlock(t *testing.T) { Extra: data.ExtraData, MixDigest: data.Random, } - block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */) + block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs}) data.BlockHash = block.Hash() // Send the new payload resp2, err := api.NewPayloadV1(data) @@ -1554,7 +1554,7 @@ func TestBlockToPayloadWithBlobs(t *testing.T) { }, } - block := types.NewBlock(&header, txs, nil, nil, trie.NewStackTrie(nil)) + block := types.NewBlock(&header, &types.Body{Transactions: txs}, nil, trie.NewStackTrie(nil)) envelope := engine.BlockToExecutableData(block, nil, sidecars) var want int for _, tx := range txs { diff --git a/eth/downloader/beaconsync.go b/eth/downloader/beaconsync.go index 7dfc419f4e..8088f16af9 100644 --- a/eth/downloader/beaconsync.go +++ b/eth/downloader/beaconsync.go @@ -106,7 +106,7 @@ func (b *beaconBackfiller) resume() { }() // If the downloader fails, report an error as in beacon chain mode there // should be no errors as long as the chain we're syncing to is valid. - if err := b.downloader.synchronise("", common.Hash{}, nil, nil, mode, true, b.started); err != nil { + if err := b.downloader.synchronise(mode, b.started); err != nil { log.Error("Beacon backfilling failed", "err", err) return } @@ -268,9 +268,9 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) { return start, nil } -// fetchBeaconHeaders feeds skeleton headers to the downloader queue for scheduling +// fetchHeaders feeds skeleton headers to the downloader queue for scheduling // until sync errors or is finished. -func (d *Downloader) fetchBeaconHeaders(from uint64) error { +func (d *Downloader) fetchHeaders(from uint64) error { var head *types.Header _, tail, _, err := d.skeleton.Bounds() if err != nil { diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 941f575aa8..bb083260e4 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -41,17 +41,14 @@ import ( var ( MaxBlockFetch = 128 // Number of blocks to be fetched per retrieval request MaxHeaderFetch = 192 // Number of block headers to be fetched per retrieval request - MaxSkeletonSize = 128 // Number of header fetches needed for a skeleton assembly MaxReceiptFetch = 256 // Number of transaction receipts to allow fetching per request - maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) - maxHeadersProcess = 2048 // Number of header download results to import at once into the chain - maxResultsProcess = 2048 // Number of content download results to import at once into the chain - fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) - lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) + maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) + maxHeadersProcess = 2048 // Number of header download results to import at once into the chain + maxResultsProcess = 2048 // Number of content download results to import at once into the chain + fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) - reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection - reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs + reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download @@ -59,24 +56,16 @@ var ( ) var ( - errBusy = errors.New("busy") - errUnknownPeer = errors.New("peer is unknown or unhealthy") - errBadPeer = errors.New("action from bad peer ignored") - errStallingPeer = errors.New("peer is stalling") - errUnsyncedPeer = errors.New("unsynced peer") - errNoPeers = errors.New("no peers to keep download active") + errBusy = errors.New("busy") + errBadPeer = errors.New("action from bad peer ignored") + errTimeout = errors.New("timeout") - errEmptyHeaderSet = errors.New("empty header set by peer") - errPeersUnavailable = errors.New("no peers available or all tried for download") - errInvalidAncestor = errors.New("retrieved ancestor is invalid") errInvalidChain = errors.New("retrieved hash chain is invalid") errInvalidBody = errors.New("retrieved block body is invalid") errInvalidReceipt = errors.New("retrieved receipt is invalid") errCancelStateFetch = errors.New("state data download canceled (requested)") errCancelContentProcessing = errors.New("content processing canceled (requested)") errCanceled = errors.New("syncing canceled (requested)") - errTooOld = errors.New("peer's protocol version too old") - errNoAncestorFound = errors.New("no common ancestor found") errNoPivotHeader = errors.New("pivot header is not found") ErrMergeTransition = errors.New("legacy sync reached the merge") ) @@ -99,9 +88,8 @@ type Downloader struct { mode atomic.Uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode mux *event.TypeMux // Event multiplexer to announce sync operation events - genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) - queue *queue // Scheduler for selecting the hashes to download - peers *peerSet // Set of active peers from which download can proceed + queue *queue // Scheduler for selecting the hashes to download + peers *peerSet // Set of active peers from which download can proceed stateDB ethdb.Database // Database to state sync into (and deduplicate via) @@ -118,11 +106,10 @@ type Downloader struct { badBlock badBlockFn // Reports a block as rejected by the chain // Status - synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing - synchronising atomic.Bool - notified atomic.Bool - committed atomic.Bool - ancientLimit uint64 // The maximum block number which can be regarded as ancient data. + synchronising atomic.Bool + notified atomic.Bool + committed atomic.Bool + ancientLimit uint64 // The maximum block number which can be regarded as ancient data. // Channels headerProcCh chan *headerTask // Channel to feed the header processor new tasks @@ -138,7 +125,6 @@ type Downloader struct { stateSyncStart chan *stateSync // Cancellation and termination - cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) cancelCh chan struct{} // Channel to cancel mid-flight syncs cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. @@ -147,7 +133,6 @@ type Downloader struct { quitLock sync.Mutex // Lock to prevent double closes // Testing hooks - syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) @@ -326,39 +311,10 @@ func (d *Downloader) UnregisterPeer(id string) error { return nil } -// LegacySync tries to sync up our local block chain with a remote peer, both -// adding various sanity checks as well as wrapping it with various log entries. -func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, mode SyncMode) error { - err := d.synchronise(id, head, td, ttd, mode, false, nil) - - switch err { - case nil, errBusy, errCanceled: - return err - } - if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) || - errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) || - errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) { - log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) - if d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) - } else { - d.dropPeer(id) - } - return err - } - if errors.Is(err, ErrMergeTransition) { - return err // This is an expected fault, don't keep printing it in a spin-loop - } - log.Warn("Synchronisation failed, retrying", "err", err) - return err -} - // synchronise will select the peer and use it for synchronising. If an empty string is given // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the // checks fail an error will be returned. This method is synchronous -func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, mode SyncMode, beaconMode bool, beaconPing chan struct{}) error { +func (d *Downloader) synchronise(mode SyncMode, beaconPing chan struct{}) error { // The beacon header syncer is async. It will start this synchronization and // will continue doing other tasks. However, if synchronization needs to be // cancelled, the syncer needs to know if we reached the startup point (and @@ -373,10 +329,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, } }() } - // Mock out the synchronisation if testing - if d.synchroniseMock != nil { - return d.synchroniseMock(id, hash) - } // Make sure only one goroutine is ever allowed past this point at once if !d.synchronising.CompareAndSwap(false, true) { return errBusy @@ -424,7 +376,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, // Create cancel channel for aborting mid-flight and mark the master peer d.cancelLock.Lock() d.cancelCh = make(chan struct{}) - d.cancelPeer = id d.cancelLock.Unlock() defer d.Cancel() // No matter what, we can't leave the cancel channel open @@ -432,27 +383,19 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, // Atomically set the requested sync mode d.mode.Store(uint32(mode)) - // Retrieve the origin peer and initiate the downloading process - var p *peerConnection - if !beaconMode { // Beacon mode doesn't need a peer to sync from - p = d.peers.Peer(id) - if p == nil { - return errUnknownPeer - } - } if beaconPing != nil { close(beaconPing) } - return d.syncWithPeer(p, hash, td, ttd, beaconMode) + return d.syncToHead() } func (d *Downloader) getMode() SyncMode { return SyncMode(d.mode.Load()) } -// syncWithPeer starts a block synchronization based on the hash chain from the -// specified peer and head hash. -func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *big.Int, beaconMode bool) (err error) { +// syncToHead starts a block synchronization based on the hash chain from +// the specified head hash. +func (d *Downloader) syncToHead() (err error) { d.mux.Post(StartEvent{}) defer func() { // reset on error @@ -465,52 +408,39 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * }() mode := d.getMode() - if !beaconMode { - log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode) - } else { - log.Debug("Backfilling with the network", "mode", mode) - } + log.Debug("Backfilling with the network", "mode", mode) defer func(start time.Time) { log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start))) }(time.Now()) // Look up the sync boundaries: the common ancestor and the target block var latest, pivot, final *types.Header - if !beaconMode { - // In legacy mode, use the master peer to retrieve the headers from - latest, pivot, err = d.fetchHead(p) - if err != nil { - return err - } - } else { - // In beacon mode, use the skeleton chain to retrieve the headers from - latest, _, final, err = d.skeleton.Bounds() - if err != nil { - return err - } - if latest.Number.Uint64() > uint64(fsMinFullBlocks) { - number := latest.Number.Uint64() - uint64(fsMinFullBlocks) - - // Retrieve the pivot header from the skeleton chain segment but - // fallback to local chain if it's not found in skeleton space. - if pivot = d.skeleton.Header(number); pivot == nil { - _, oldest, _, _ := d.skeleton.Bounds() // error is already checked - if number < oldest.Number.Uint64() { - count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks - headers := d.readHeaderRange(oldest, count) - if len(headers) == count { - pivot = headers[len(headers)-1] - log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number) - } + latest, _, final, err = d.skeleton.Bounds() + if err != nil { + return err + } + if latest.Number.Uint64() > uint64(fsMinFullBlocks) { + number := latest.Number.Uint64() - uint64(fsMinFullBlocks) + + // Retrieve the pivot header from the skeleton chain segment but + // fallback to local chain if it's not found in skeleton space. + if pivot = d.skeleton.Header(number); pivot == nil { + _, oldest, _, _ := d.skeleton.Bounds() // error is already checked + if number < oldest.Number.Uint64() { + count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks + headers := d.readHeaderRange(oldest, count) + if len(headers) == count { + pivot = headers[len(headers)-1] + log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number) } } - // Print an error log and return directly in case the pivot header - // is still not found. It means the skeleton chain is not linked - // correctly with local chain. - if pivot == nil { - log.Error("Pivot header is not found", "number", number) - return errNoPivotHeader - } + } + // Print an error log and return directly in case the pivot header + // is still not found. It means the skeleton chain is not linked + // correctly with local chain. + if pivot == nil { + log.Error("Pivot header is not found", "number", number) + return errNoPivotHeader } } // If no pivot block was returned, the head is below the min full block @@ -522,19 +452,10 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * } height := latest.Number.Uint64() - var origin uint64 - if !beaconMode { - // In legacy mode, reach out to the network and find the ancestor - origin, err = d.findAncestor(p, latest) - if err != nil { - return err - } - } else { - // In beacon mode, use the skeleton chain for the ancestor lookup - origin, err = d.findBeaconAncestor() - if err != nil { - return err - } + // In beacon mode, use the skeleton chain for the ancestor lookup + origin, err := d.findBeaconAncestor() + if err != nil { + return err } d.syncStatsLock.Lock() if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { @@ -577,24 +498,15 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * // the ancientLimit through that. Otherwise calculate the ancient limit through // the advertised height of the remote peer. This most is mostly a fallback for // legacy networks, but should eventually be dropped. TODO(karalabe). - if beaconMode { - // Beacon sync, use the latest finalized block as the ancient limit - // or a reasonable height if no finalized block is yet announced. - if final != nil { - d.ancientLimit = final.Number.Uint64() - } else if height > fullMaxForkAncestry+1 { - d.ancientLimit = height - fullMaxForkAncestry - 1 - } else { - d.ancientLimit = 0 - } + // + // Beacon sync, use the latest finalized block as the ancient limit + // or a reasonable height if no finalized block is yet announced. + if final != nil { + d.ancientLimit = final.Number.Uint64() + } else if height > fullMaxForkAncestry+1 { + d.ancientLimit = height - fullMaxForkAncestry - 1 } else { - // Legacy sync, use the best announcement we have from the remote peer. - // TODO(karalabe): Drop this pathway. - if height > fullMaxForkAncestry+1 { - d.ancientLimit = height - fullMaxForkAncestry - 1 - } else { - d.ancientLimit = 0 - } + d.ancientLimit = 0 } frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here. @@ -616,22 +528,13 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * } // Initiate the sync using a concurrent header and content retrieval algorithm d.queue.Prepare(origin+1, mode) - if d.syncInitHook != nil { - d.syncInitHook(origin, height) - } - var headerFetcher func() error - if !beaconMode { - // In legacy mode, headers are retrieved from the network - headerFetcher = func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) } - } else { - // In beacon mode, headers are served by the skeleton syncer - headerFetcher = func() error { return d.fetchBeaconHeaders(origin + 1) } - } + + // In beacon mode, headers are served by the skeleton syncer fetchers := []func() error{ - headerFetcher, // Headers are always retrieved - func() error { return d.fetchBodies(origin+1, beaconMode) }, // Bodies are retrieved during normal and snap sync - func() error { return d.fetchReceipts(origin+1, beaconMode) }, // Receipts are retrieved during snap sync - func() error { return d.processHeaders(origin+1, td, ttd, beaconMode) }, + func() error { return d.fetchHeaders(origin + 1) }, // Headers are always retrieved + func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and snap sync + func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during snap sync + func() error { return d.processHeaders(origin + 1) }, } if mode == SnapSync { d.pivotLock.Lock() @@ -640,7 +543,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * fetchers = append(fetchers, func() error { return d.processSnapSyncContent() }) } else if mode == FullSync { - fetchers = append(fetchers, func() error { return d.processFullSyncContent(ttd, beaconMode) }) + fetchers = append(fetchers, func() error { return d.processFullSyncContent() }) } return d.spawnSync(fetchers) } @@ -719,540 +622,12 @@ func (d *Downloader) Terminate() { d.Cancel() } -// fetchHead retrieves the head header and prior pivot block (if available) from -// a remote peer. -func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) { - p.log.Debug("Retrieving remote chain head") - mode := d.getMode() - - // Request the advertised remote head block and wait for the response - latest, _ := p.peer.Head() - fetch := 1 - if mode == SnapSync { - fetch = 2 // head + pivot headers - } - headers, hashes, err := d.fetchHeadersByHash(p, latest, fetch, fsMinFullBlocks-1, true) - if err != nil { - return nil, nil, err - } - // Make sure the peer gave us at least one and at most the requested headers - if len(headers) == 0 || len(headers) > fetch { - return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch) - } - // The first header needs to be the head, validate against the request. If - // only 1 header was returned, make sure there's no pivot or there was not - // one requested. - head = headers[0] - if len(headers) == 1 { - if mode == SnapSync && head.Number.Uint64() > uint64(fsMinFullBlocks) { - return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer) - } - p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", hashes[0]) - return head, nil, nil - } - // At this point we have 2 headers in total and the first is the - // validated head of the chain. Check the pivot number and return, - pivot = headers[1] - if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) { - return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks)) - } - return head, pivot, nil -} - -// calculateRequestSpan calculates what headers to request from a peer when trying to determine the -// common ancestor. -// It returns parameters to be used for peer.RequestHeadersByNumber: -// -// from - starting block number -// count - number of headers to request -// skip - number of headers to skip -// -// and also returns 'max', the last block which is expected to be returned by the remote peers, -// given the (from,count,skip) -func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { - var ( - from int - count int - MaxCount = MaxHeaderFetch / 16 - ) - // requestHead is the highest block that we will ask for. If requestHead is not offset, - // the highest block that we will get is 16 blocks back from head, which means we - // will fetch 14 or 15 blocks unnecessarily in the case the height difference - // between us and the peer is 1-2 blocks, which is most common - requestHead := int(remoteHeight) - 1 - if requestHead < 0 { - requestHead = 0 - } - // requestBottom is the lowest block we want included in the query - // Ideally, we want to include the one just below our own head - requestBottom := int(localHeight - 1) - if requestBottom < 0 { - requestBottom = 0 - } - totalSpan := requestHead - requestBottom - span := 1 + totalSpan/MaxCount - if span < 2 { - span = 2 - } - if span > 16 { - span = 16 - } - - count = 1 + totalSpan/span - if count > MaxCount { - count = MaxCount - } - if count < 2 { - count = 2 - } - from = requestHead - (count-1)*span - if from < 0 { - from = 0 - } - max := from + (count-1)*span - return int64(from), count, span - 1, uint64(max) -} - -// findAncestor tries to locate the common ancestor link of the local chain and -// a remote peers blockchain. In the general case when our node was in sync and -// on the correct chain, checking the top N links should already get us a match. -// In the rare scenario when we ended up on a long reorganisation (i.e. none of -// the head links match), we do a binary search to find the common ancestor. -func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { - // Figure out the valid ancestor range to prevent rewrite attacks - var ( - floor = int64(-1) - localHeight uint64 - remoteHeight = remoteHeader.Number.Uint64() - ) - mode := d.getMode() - switch mode { - case FullSync: - localHeight = d.blockchain.CurrentBlock().Number.Uint64() - case SnapSync: - localHeight = d.blockchain.CurrentSnapBlock().Number.Uint64() - default: - localHeight = d.lightchain.CurrentHeader().Number.Uint64() - } - p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) - - // Recap floor value for binary search - maxForkAncestry := fullMaxForkAncestry - if d.getMode() == LightSync { - maxForkAncestry = lightMaxForkAncestry - } - if localHeight >= maxForkAncestry { - // We're above the max reorg threshold, find the earliest fork point - floor = int64(localHeight - maxForkAncestry) - } - // If we're doing a light sync, ensure the floor doesn't go below the CHT, as - // all headers before that point will be missing. - if mode == LightSync { - // If we don't know the current CHT position, find it - if d.genesis == 0 { - header := d.lightchain.CurrentHeader() - for header != nil { - d.genesis = header.Number.Uint64() - if floor >= int64(d.genesis)-1 { - break - } - header = d.lightchain.GetHeaderByHash(header.ParentHash) - } - } - // We already know the "genesis" block number, cap floor to that - if floor < int64(d.genesis)-1 { - floor = int64(d.genesis) - 1 - } - } - - ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor) - if err == nil { - return ancestor, nil - } - // The returned error was not nil. - // If the error returned does not reflect that a common ancestor was not found, return it. - // If the error reflects that a common ancestor was not found, continue to binary search, - // where the error value will be reassigned. - if !errors.Is(err, errNoAncestorFound) { - return 0, err - } - - ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor) - if err != nil { - return 0, err - } - return ancestor, nil -} - -func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (uint64, error) { - from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) - - p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) - headers, hashes, err := d.fetchHeadersByNumber(p, uint64(from), count, skip, false) - if err != nil { - return 0, err - } - // Wait for the remote response to the head fetch - number, hash := uint64(0), common.Hash{} - - // Make sure the peer actually gave something valid - if len(headers) == 0 { - p.log.Warn("Empty head header set") - return 0, errEmptyHeaderSet - } - // Make sure the peer's reply conforms to the request - for i, header := range headers { - expectNumber := from + int64(i)*int64(skip+1) - if number := header.Number.Int64(); number != expectNumber { - p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) - return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering")) - } - } - // Check if a common ancestor was found - for i := len(headers) - 1; i >= 0; i-- { - // Skip any headers that underflow/overflow our requested set - if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { - continue - } - // Otherwise check if we already know the header or not - h := hashes[i] - n := headers[i].Number.Uint64() - - var known bool - switch mode { - case FullSync: - known = d.blockchain.HasBlock(h, n) - case SnapSync: - known = d.blockchain.HasFastBlock(h, n) - default: - known = d.lightchain.HasHeader(h, n) - } - if known { - number, hash = n, h - break - } - } - // If the head fetch already found an ancestor, return - if hash != (common.Hash{}) { - if int64(number) <= floor { - p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) - return 0, errInvalidAncestor - } - p.log.Debug("Found common ancestor", "number", number, "hash", hash) - return number, nil - } - return 0, errNoAncestorFound -} - -func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (uint64, error) { - hash := common.Hash{} - - // Ancestor not found, we need to binary search over our chain - start, end := uint64(0), remoteHeight - if floor > 0 { - start = uint64(floor) - } - p.log.Trace("Binary searching for common ancestor", "start", start, "end", end) - - for start+1 < end { - // Split our chain interval in two, and request the hash to cross check - check := (start + end) / 2 - - headers, hashes, err := d.fetchHeadersByNumber(p, check, 1, 0, false) - if err != nil { - return 0, err - } - // Make sure the peer actually gave something valid - if len(headers) != 1 { - p.log.Warn("Multiple headers for single request", "headers", len(headers)) - return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers)) - } - // Modify the search interval based on the response - h := hashes[0] - n := headers[0].Number.Uint64() - - var known bool - switch mode { - case FullSync: - known = d.blockchain.HasBlock(h, n) - case SnapSync: - known = d.blockchain.HasFastBlock(h, n) - default: - known = d.lightchain.HasHeader(h, n) - } - if !known { - end = check - continue - } - header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists - if header.Number.Uint64() != check { - p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) - return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number) - } - start = check - hash = h - } - // Ensure valid ancestry and return - if int64(start) <= floor { - p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) - return 0, errInvalidAncestor - } - p.log.Debug("Found common ancestor", "number", start, "hash", hash) - return start, nil -} - -// fetchHeaders keeps retrieving headers concurrently from the number -// requested, until no more are returned, potentially throttling on the way. To -// facilitate concurrency but still protect against malicious nodes sending bad -// headers, we construct a header chain skeleton using the "origin" peer we are -// syncing with, and fill in the missing headers using anyone else. Headers from -// other peers are only accepted if they map cleanly to the skeleton. If no one -// can fill in the skeleton - not even the origin peer - it's assumed invalid and -// the origin is dropped. -func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) error { - p.log.Debug("Directing header downloads", "origin", from) - defer p.log.Debug("Header download terminated") - - // Start pulling the header chain skeleton until all is done - var ( - skeleton = true // Skeleton assembly phase or finishing up - pivoting = false // Whether the next request is pivot verification - ancestor = from - mode = d.getMode() - ) - for { - // Pull the next batch of headers, it either: - // - Pivot check to see if the chain moved too far - // - Skeleton retrieval to permit concurrent header fetches - // - Full header retrieval if we're near the chain head - var ( - headers []*types.Header - hashes []common.Hash - err error - ) - switch { - case pivoting: - d.pivotLock.RLock() - pivot := d.pivotHeader.Number.Uint64() - d.pivotLock.RUnlock() - - p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks)) - headers, hashes, err = d.fetchHeadersByNumber(p, pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep - - case skeleton: - p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) - headers, hashes, err = d.fetchHeadersByNumber(p, from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) - - default: - p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) - headers, hashes, err = d.fetchHeadersByNumber(p, from, MaxHeaderFetch, 0, false) - } - switch err { - case nil: - // Headers retrieved, continue with processing - - case errCanceled: - // Sync cancelled, no issue, propagate up - return err - - default: - // Header retrieval either timed out, or the peer failed in some strange way - // (e.g. disconnect). Consider the master peer bad and drop - d.dropPeer(p.id) - - // Finish the sync gracefully instead of dumping the gathered data though - for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { - select { - case ch <- false: - case <-d.cancelCh: - } - } - select { - case d.headerProcCh <- nil: - case <-d.cancelCh: - } - return fmt.Errorf("%w: header request failed: %v", errBadPeer, err) - } - // If the pivot is being checked, move if it became stale and run the real retrieval - var pivot uint64 - - d.pivotLock.RLock() - if d.pivotHeader != nil { - pivot = d.pivotHeader.Number.Uint64() - } - d.pivotLock.RUnlock() - - if pivoting { - if len(headers) == 2 { - if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want { - log.Warn("Peer sent invalid next pivot", "have", have, "want", want) - return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want) - } - if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want { - log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want) - return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want) - } - log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number) - pivot = headers[0].Number.Uint64() - - d.pivotLock.Lock() - d.pivotHeader = headers[0] - d.pivotLock.Unlock() - - // Write out the pivot into the database so a rollback beyond - // it will reenable snap sync and update the state root that - // the state syncer will be downloading. - rawdb.WriteLastPivotNumber(d.stateDB, pivot) - } - // Disable the pivot check and fetch the next batch of headers - pivoting = false - continue - } - // If the skeleton's finished, pull any remaining head headers directly from the origin - if skeleton && len(headers) == 0 { - // A malicious node might withhold advertised headers indefinitely - if from+uint64(MaxHeaderFetch)-1 <= head { - p.log.Warn("Peer withheld skeleton headers", "advertised", head, "withheld", from+uint64(MaxHeaderFetch)-1) - return fmt.Errorf("%w: withheld skeleton headers: advertised %d, withheld #%d", errStallingPeer, head, from+uint64(MaxHeaderFetch)-1) - } - p.log.Debug("No skeleton, fetching headers directly") - skeleton = false - continue - } - // If no more headers are inbound, notify the content fetchers and return - if len(headers) == 0 { - // Don't abort header fetches while the pivot is downloading - if !d.committed.Load() && pivot <= from { - p.log.Debug("No headers, waiting for pivot commit") - select { - case <-time.After(fsHeaderContCheck): - continue - case <-d.cancelCh: - return errCanceled - } - } - // Pivot done (or not in snap sync) and no more headers, terminate the process - p.log.Debug("No more headers available") - select { - case d.headerProcCh <- nil: - return nil - case <-d.cancelCh: - return errCanceled - } - } - // If we received a skeleton batch, resolve internals concurrently - var progressed bool - if skeleton { - filled, hashset, proced, err := d.fillHeaderSkeleton(from, headers) - if err != nil { - p.log.Debug("Skeleton chain invalid", "err", err) - return fmt.Errorf("%w: %v", errInvalidChain, err) - } - headers = filled[proced:] - hashes = hashset[proced:] - - progressed = proced > 0 - from += uint64(proced) - } else { - // A malicious node might withhold advertised headers indefinitely - if n := len(headers); n < MaxHeaderFetch && headers[n-1].Number.Uint64() < head { - p.log.Warn("Peer withheld headers", "advertised", head, "delivered", headers[n-1].Number.Uint64()) - return fmt.Errorf("%w: withheld headers: advertised %d, delivered %d", errStallingPeer, head, headers[n-1].Number.Uint64()) - } - // If we're closing in on the chain head, but haven't yet reached it, delay - // the last few headers so mini reorgs on the head don't cause invalid hash - // chain errors. - if n := len(headers); n > 0 { - // Retrieve the current head we're at - var head uint64 - if mode == LightSync { - head = d.lightchain.CurrentHeader().Number.Uint64() - } else { - head = d.blockchain.CurrentSnapBlock().Number.Uint64() - if full := d.blockchain.CurrentBlock().Number.Uint64(); head < full { - head = full - } - } - // If the head is below the common ancestor, we're actually deduplicating - // already existing chain segments, so use the ancestor as the fake head. - // Otherwise, we might end up delaying header deliveries pointlessly. - if head < ancestor { - head = ancestor - } - // If the head is way older than this batch, delay the last few headers - if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { - delay := reorgProtHeaderDelay - if delay > n { - delay = n - } - headers = headers[:n-delay] - hashes = hashes[:n-delay] - } - } - } - // If no headers have been delivered, or all of them have been delayed, - // sleep a bit and retry. Take care with headers already consumed during - // skeleton filling - if len(headers) == 0 && !progressed { - p.log.Trace("All headers delayed, waiting") - select { - case <-time.After(fsHeaderContCheck): - continue - case <-d.cancelCh: - return errCanceled - } - } - // Insert any remaining new headers and fetch the next batch - if len(headers) > 0 { - p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) - select { - case d.headerProcCh <- &headerTask{ - headers: headers, - hashes: hashes, - }: - case <-d.cancelCh: - return errCanceled - } - from += uint64(len(headers)) - } - // If we're still skeleton filling snap sync, check pivot staleness - // before continuing to the next skeleton filling - if skeleton && pivot > 0 { - pivoting = true - } - } -} - -// fillHeaderSkeleton concurrently retrieves headers from all our available peers -// and maps them to the provided skeleton header chain. -// -// Any partial results from the beginning of the skeleton is (if possible) forwarded -// immediately to the header processor to keep the rest of the pipeline full even -// in the case of header stalls. -// -// The method returns the entire filled skeleton and also the number of headers -// already forwarded for processing. -func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, []common.Hash, int, error) { - log.Debug("Filling up skeleton", "from", from) - d.queue.ScheduleSkeleton(from, skeleton) - - err := d.concurrentFetch((*headerQueue)(d), false) - if err != nil { - log.Debug("Skeleton fill failed", "err", err) - } - filled, hashes, proced := d.queue.RetrieveHeaders() - if err == nil { - log.Debug("Skeleton fill succeeded", "filled", len(filled), "processed", proced) - } - return filled, hashes, proced, err -} - // fetchBodies iteratively downloads the scheduled block bodies, taking any // available peers, reserving a chunk of blocks for each, waiting for delivery // and also periodically checking for timeouts. -func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error { +func (d *Downloader) fetchBodies(from uint64) error { log.Debug("Downloading block bodies", "origin", from) - err := d.concurrentFetch((*bodyQueue)(d), beaconMode) + err := d.concurrentFetch((*bodyQueue)(d)) log.Debug("Block body download terminated", "err", err) return err @@ -1261,9 +636,9 @@ func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error { // fetchReceipts iteratively downloads the scheduled block receipts, taking any // available peers, reserving a chunk of receipts for each, waiting for delivery // and also periodically checking for timeouts. -func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { +func (d *Downloader) fetchReceipts(from uint64) error { log.Debug("Downloading receipts", "origin", from) - err := d.concurrentFetch((*receiptQueue)(d), beaconMode) + err := d.concurrentFetch((*receiptQueue)(d)) log.Debug("Receipt download terminated", "err", err) return err @@ -1272,11 +647,10 @@ func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { // processHeaders takes batches of retrieved headers from an input channel and // keeps processing and scheduling them into the header chain and downloader's // queue until the stream ends or a failure occurs. -func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error { +func (d *Downloader) processHeaders(origin uint64) error { var ( - mode = d.getMode() - gotHeaders = false // Wait for batches of headers to process - timer = time.NewTimer(time.Second) + mode = d.getMode() + timer = time.NewTimer(time.Second) ) defer timer.Stop() @@ -1295,48 +669,11 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode case <-d.cancelCh: } } - // If we're in legacy sync mode, we need to check total difficulty - // violations from malicious peers. That is not needed in beacon - // mode and we can skip to terminating sync. - if !beaconMode { - // If no headers were retrieved at all, the peer violated its TD promise that it had a - // better chain compared to ours. The only exception is if its promised blocks were - // already imported by other means (e.g. fetcher): - // - // R , L : Both at block 10 - // R: Mine block 11, and propagate it to L - // L: Queue block 11 for import - // L: Notice that R's head and TD increased compared to ours, start sync - // L: Import of block 11 finishes - // L: Sync begins, and finds common ancestor at 11 - // L: Request new headers up from 11 (R's TD was higher, it must have something) - // R: Nothing to give - if mode != LightSync { - head := d.blockchain.CurrentBlock() - if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { - return errStallingPeer - } - } - // If snap or light syncing, ensure promised headers are indeed delivered. This is - // needed to detect scenarios where an attacker feeds a bad pivot and then bails out - // of delivering the post-pivot blocks that would flag the invalid content. - // - // This check cannot be executed "as is" for full imports, since blocks may still be - // queued for processing when the header download completes. However, as long as the - // peer gave us something useful, we're already happy/progressed (above check). - if mode == SnapSync || mode == LightSync { - head := d.lightchain.CurrentHeader() - if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { - return errStallingPeer - } - } - } return nil } // Otherwise split the chunk of headers into batches and process them headers, hashes := task.headers, task.hashes - gotHeaders = true for len(headers) > 0 { // Terminate if something failed in between processing chunks select { @@ -1357,44 +694,12 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode // Although the received headers might be all valid, a legacy // PoW/PoA sync must not accept post-merge headers. Make sure // that any transition is rejected at this point. - var ( - rejected []*types.Header - td *big.Int - ) - if !beaconMode && ttd != nil { - td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1) - if td == nil { - // This should never really happen, but handle gracefully for now - log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash) - return fmt.Errorf("%w: parent TD missing", errInvalidChain) - } - for i, header := range chunkHeaders { - td = new(big.Int).Add(td, header.Difficulty) - if td.Cmp(ttd) >= 0 { - // Terminal total difficulty reached, allow the last header in - if new(big.Int).Sub(td, header.Difficulty).Cmp(ttd) < 0 { - chunkHeaders, rejected = chunkHeaders[:i+1], chunkHeaders[i+1:] - if len(rejected) > 0 { - // Make a nicer user log as to the first TD truly rejected - td = new(big.Int).Add(td, rejected[0].Difficulty) - } - } else { - chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:] - } - break - } - } - } if len(chunkHeaders) > 0 { if n, err := d.lightchain.InsertHeaderChain(chunkHeaders); err != nil { log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) return fmt.Errorf("%w: %v", errInvalidChain, err) } } - if len(rejected) != 0 { - log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) - return ErrMergeTransition - } } // Unless we're doing light chains, schedule the headers for associated content retrieval if mode == FullSync || mode == SnapSync { @@ -1436,7 +741,7 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode } // processFullSyncContent takes fetch results from the queue and imports them into the chain. -func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error { +func (d *Downloader) processFullSyncContent() error { for { results := d.queue.Results(true) if len(results) == 0 { @@ -1445,44 +750,9 @@ func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error if d.chainInsertHook != nil { d.chainInsertHook(results) } - // Although the received blocks might be all valid, a legacy PoW/PoA sync - // must not accept post-merge blocks. Make sure that pre-merge blocks are - // imported, but post-merge ones are rejected. - var ( - rejected []*fetchResult - td *big.Int - ) - if !beaconMode && ttd != nil { - td = d.blockchain.GetTd(results[0].Header.ParentHash, results[0].Header.Number.Uint64()-1) - if td == nil { - // This should never really happen, but handle gracefully for now - log.Error("Failed to retrieve parent block TD", "number", results[0].Header.Number.Uint64()-1, "hash", results[0].Header.ParentHash) - return fmt.Errorf("%w: parent TD missing", errInvalidChain) - } - for i, result := range results { - td = new(big.Int).Add(td, result.Header.Difficulty) - if td.Cmp(ttd) >= 0 { - // Terminal total difficulty reached, allow the last block in - if new(big.Int).Sub(td, result.Header.Difficulty).Cmp(ttd) < 0 { - results, rejected = results[:i+1], results[i+1:] - if len(rejected) > 0 { - // Make a nicer user log as to the first TD truly rejected - td = new(big.Int).Add(td, rejected[0].Header.Difficulty) - } - } else { - results, rejected = results[:i], results[i:] - } - break - } - } - } if err := d.importBlockResults(results); err != nil { return err } - if len(rejected) != 0 { - log.Info("Legacy sync reached merge threshold", "number", rejected[0].Header.Number, "hash", rejected[0].Header.Hash(), "td", td, "ttd", ttd) - return ErrMergeTransition - } } } @@ -1504,7 +774,7 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { ) blocks := make([]*types.Block, len(results)) for i, result := range results { - blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) + blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.body()) } // Downloaded blocks are always regarded as trusted after the // transition. Because the downloaded chain is guided by the @@ -1726,7 +996,7 @@ func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *state blocks := make([]*types.Block, len(results)) receipts := make([]types.Receipts, len(results)) for i, result := range results { - blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) + blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.body()) receipts[i] = result.Receipts } if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil { @@ -1737,7 +1007,7 @@ func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *state } func (d *Downloader) commitPivotBlock(result *fetchResult) error { - block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) + block := types.NewBlockWithHeader(result.Header).WithBody(result.body()) log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash()) // Commit the pivot block as the new head, will require full sync from here on diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 198a5a19d7..b19fec105e 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -19,8 +19,6 @@ package downloader import ( "fmt" "math/big" - "os" - "strings" "sync" "sync/atomic" "testing" @@ -44,7 +42,6 @@ import ( // downloadTester is a test simulator for mocking out local block chain. type downloadTester struct { - freezer string chain *core.BlockChain downloader *Downloader @@ -59,8 +56,7 @@ func newTester(t *testing.T) *downloadTester { // newTesterWithNotification creates a new downloader test mocker. func newTesterWithNotification(t *testing.T, success func()) *downloadTester { - freezer := t.TempDir() - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { panic(err) } @@ -77,9 +73,8 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester { panic(err) } tester := &downloadTester{ - freezer: freezer, - chain: chain, - peers: make(map[string]*downloadTesterPeer), + chain: chain, + peers: make(map[string]*downloadTesterPeer), } tester.downloader = New(db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success) return tester @@ -90,27 +85,6 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester { func (dl *downloadTester) terminate() { dl.downloader.Terminate() dl.chain.Stop() - - os.RemoveAll(dl.freezer) -} - -// sync starts synchronizing with a remote peer, blocking until it completes. -func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { - head := dl.peers[id].chain.CurrentBlock() - if td == nil { - // If no particular TD was requested, load from the peer's blockchain - td = dl.peers[id].chain.GetTd(head.Hash(), head.Number.Uint64()) - } - // Synchronise with the chosen peer and ensure proper cleanup afterwards - err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil) - select { - case <-dl.downloader.cancelCh: - // Ok, downloader fully cancelled after sync cycle - default: - // Downloader is still accepting packets, can block a peer up - panic("downloader active post sync cycle") // panic will be caught by tester - } - return err } // newPeer registers a new block download source into the downloader. @@ -119,10 +93,10 @@ func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block defer dl.lock.Unlock() peer := &downloadTesterPeer{ - dl: dl, - id: id, - chain: newTestBlockchain(blocks), - withholdHeaders: make(map[common.Hash]struct{}), + dl: dl, + id: id, + chain: newTestBlockchain(blocks), + withholdBodies: make(map[common.Hash]struct{}), } dl.peers[id] = peer @@ -146,11 +120,10 @@ func (dl *downloadTester) dropPeer(id string) { } type downloadTesterPeer struct { - dl *downloadTester - id string - chain *core.BlockChain - - withholdHeaders map[common.Hash]struct{} + dl *downloadTester + withholdBodies map[common.Hash]struct{} + id string + chain *core.BlockChain } // Head constructs a function to retrieve a peer's current head hash @@ -186,15 +159,6 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i Reverse: reverse, }, nil) headers := unmarshalRlpHeaders(rlpHeaders) - // If a malicious peer is simulated withholding headers, delete them - for hash := range dlp.withholdHeaders { - for i, header := range headers { - if header.Hash() == hash { - headers = append(headers[:i], headers[i+1:]...) - break - } - } - } hashes := make([]common.Hash, len(headers)) for i, header := range headers { hashes[i] = header.Hash() @@ -230,15 +194,6 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, Reverse: reverse, }, nil) headers := unmarshalRlpHeaders(rlpHeaders) - // If a malicious peer is simulated withholding headers, delete them - for hash := range dlp.withholdHeaders { - for i, header := range headers { - if header.Hash() == hash { - headers = append(headers[:i], headers[i+1:]...) - break - } - } - } hashes := make([]common.Hash, len(headers)) for i, header := range headers { hashes[i] = header.Hash() @@ -278,7 +233,13 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et ) hasher := trie.NewStackTrie(nil) for i, body := range bodies { - txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) + hash := types.DeriveSha(types.Transactions(body.Transactions), hasher) + if _, ok := dlp.withholdBodies[hash]; ok { + txsHashes = append(txsHashes[:i], txsHashes[i+1:]...) + uncleHashes = append(uncleHashes[:i], uncleHashes[i+1:]...) + continue + } + txsHashes[i] = hash uncleHashes[i] = types.CalcUncleHash(body.Uncles) } req := ð.Request{ @@ -442,7 +403,10 @@ func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ET func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) } func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + success := make(chan struct{}) + tester := newTesterWithNotification(t, func() { + close(success) + }) defer tester.terminate() // Create a small enough block chain to download @@ -450,10 +414,15 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { tester.newPeer("peer", protocol, chain.blocks[1:]) // Synchronise with the peer and make sure all relevant data was retrieved - if err := tester.sync("peer", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + t.Fatalf("failed to beacon-sync chain: %v", err) + } + select { + case <-success: + assertOwnChain(t, tester, len(chain.blocks)) + case <-time.NewTimer(time.Second * 3).C: + t.Fatalf("Failed to sync chain in three seconds") } - assertOwnChain(t, tester, len(chain.blocks)) } // Tests that if a large batch of blocks are being downloaded, it is throttled @@ -479,7 +448,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { // Start a synchronisation concurrently errc := make(chan error, 1) go func() { - errc <- tester.sync("peer", nil, mode) + errc <- tester.downloader.BeaconSync(mode, testChainBase.blocks[len(testChainBase.blocks)-1].Header(), nil) }() // Iteratively take some blocks, always checking the retrieval count for { @@ -535,132 +504,17 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { } } -// Tests that simple synchronization against a forked chain works correctly. In -// this test common ancestor lookup should *not* be short circuited, and a full -// binary search should be executed. -func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) } -func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) } -func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) } - -func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) - chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81) - tester.newPeer("fork A", protocol, chainA.blocks[1:]) - tester.newPeer("fork B", protocol, chainB.blocks[1:]) - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("fork A", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("fork B", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainB.blocks)) -} - -// Tests that synchronising against a much shorter but much heavier fork works -// currently and is not dropped. -func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) } -func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) } -func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) } - -func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) - chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79) - tester.newPeer("light", protocol, chainA.blocks[1:]) - tester.newPeer("heavy", protocol, chainB.blocks[1:]) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("light", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("heavy", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainB.blocks)) -} - -// Tests that chain forks are contained within a certain interval of the current -// chain head, ensuring that malicious peers cannot waste resources by feeding -// long dead chains. -func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) } -func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) } -func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) } - -func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA - chainB := testChainForkLightB - tester.newPeer("original", protocol, chainA.blocks[1:]) - tester.newPeer("rewriter", protocol, chainB.blocks[1:]) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("original", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - // Synchronise with the second peer and ensure that the fork is rejected to being too old - if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { - t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) - } -} - -// Tests that chain forks are contained within a certain interval of the current -// chain head for short but heavy forks too. These are a bit special because they -// take different ancestor lookup paths. -func TestBoundedHeavyForkedSync68Full(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, FullSync) -} -func TestBoundedHeavyForkedSync68Snap(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync) -} -func TestBoundedHeavyForkedSync68Light(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, LightSync) -} - -func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create a long enough forked chain - chainA := testChainForkLightA - chainB := testChainForkHeavy - tester.newPeer("original", protocol, chainA.blocks[1:]) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("original", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:]) - // Synchronise with the second peer and ensure that the fork is rejected to being too old - if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { - t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) - } -} - // Tests that a canceled download wipes all previously accumulated state. func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) } func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) } func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) } func testCancel(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + complete := make(chan struct{}) + success := func() { + close(complete) + } + tester := newTesterWithNotification(t, success) defer tester.terminate() chain := testChainBase.shorten(MaxHeaderFetch) @@ -672,38 +526,16 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { t.Errorf("download queue not idle") } // Synchronise with the peer, but cancel afterwards - if err := tester.sync("peer", nil, mode); err != nil { + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } + <-complete tester.downloader.Cancel() if !tester.downloader.queue.Idle() { t.Errorf("download queue not idle") } } -// Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) } -func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) } -func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) } - -func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create various peers with various parts of the chain - targetPeers := 8 - chain := testChainBase.shorten(targetPeers * 100) - - for i := 0; i < targetPeers; i++ { - id := fmt.Sprintf("peer #%d", i) - tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:]) - } - if err := tester.sync("peer #0", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havoc on other nodes in the network. func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) } @@ -711,7 +543,11 @@ func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) } func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + complete := make(chan struct{}) + success := func() { + close(complete) + } + tester := newTesterWithNotification(t, success) defer tester.terminate() // Create a small enough block chain to download @@ -720,9 +556,14 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Create peers of every type tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:]) - // Synchronise with the requested peer and make sure all blocks were retrieved - if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + t.Fatalf("failed to start beacon sync: #{err}") + } + select { + case <-complete: + break + case <-time.NewTimer(time.Second * 3).C: + t.Fatalf("Failed to sync chain in three seconds") } assertOwnChain(t, tester, len(chain.blocks)) @@ -742,7 +583,10 @@ func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.E func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) } func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + success := make(chan struct{}) + tester := newTesterWithNotification(t, func() { + close(success) + }) defer tester.terminate() // Create a block chain to download @@ -757,10 +601,19 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { tester.downloader.receiptFetchHook = func(headers []*types.Header) { receiptsHave.Add(int32(len(headers))) } - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("peer", nil, mode); err != nil { + + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } + select { + case <-success: + checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ + HighestBlock: uint64(len(chain.blocks) - 1), + CurrentBlock: uint64(len(chain.blocks) - 1), + }) + case <-time.NewTimer(time.Second * 3).C: + t.Fatalf("Failed to sync chain in three seconds") + } assertOwnChain(t, tester, len(chain.blocks)) // Validate the number of block bodies that should have been requested @@ -783,195 +636,6 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { } } -// Tests that headers are enqueued continuously, preventing malicious nodes from -// stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) } -func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) } -func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) } - -func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) - attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{} - - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain.blocks[1:]) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - -// Tests that if requested headers are shifted (i.e. first is missing), the queue -// detects the invalid numbering. -func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) } -func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) } -func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) } - -func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Attempt a full sync with an attacker feeding shifted headers - attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) - attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{} - - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain.blocks[1:]) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - -// Tests that a peer advertising a high TD doesn't get to stall the downloader -// afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack68Full(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, FullSync) -} -func TestHighTDStarvationAttack68Snap(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, SnapSync) -} -func TestHighTDStarvationAttack68Light(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, LightSync) -} - -func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(1) - tester.newPeer("attack", protocol, chain.blocks[1:]) - if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) - } -} - -// Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) } - -func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { - // Define the disconnection requirement for individual hash fetch errors - tests := []struct { - result error - drop bool - }{ - {nil, false}, // Sync succeeded, all is well - {errBusy, false}, // Sync is already in progress, no problem - {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop - {errBadPeer, true}, // Peer was deemed bad for some reason, drop it - {errStallingPeer, true}, // Peer was detected to be stalling, drop it - {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it - {errNoPeers, false}, // No peers to download from, soft race, no issue - {errTimeout, true}, // No hashes received in due time, drop the peer - {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end - {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser - {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter - {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop - {errInvalidBody, false}, // A bad peer was detected, but not the sync origin - {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin - {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop - } - // Run the tests and check disconnection status - tester := newTester(t) - defer tester.terminate() - chain := testChainBase.shorten(1) - - for i, tt := range tests { - // Register a new peer and ensure its presence - id := fmt.Sprintf("test %d", i) - tester.newPeer(id, protocol, chain.blocks[1:]) - if _, ok := tester.peers[id]; !ok { - t.Fatalf("test %d: registered peer not found", i) - } - // Simulate a synchronisation and check the required result - tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } - - tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync) - if _, ok := tester.peers[id]; !ok != tt.drop { - t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) - } - } -} - -// Tests that synchronisation progress (origin block number, current block number -// and highest block number) is tracked and updated correctly. -func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } -func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } -func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) } - -func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Synchronise half the blocks and check initial progress - tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:]) - pending := new(sync.WaitGroup) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("peer-half", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chain.blocks)/2 - 1), - }) - progress <- struct{}{} - pending.Wait() - - // Synchronise all the blocks and check continuation progress - tester.newPeer("peer-full", protocol, chain.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("peer-full", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ - StartingBlock: uint64(len(chain.blocks)/2 - 1), - CurrentBlock: uint64(len(chain.blocks)/2 - 1), - HighestBlock: uint64(len(chain.blocks) - 1), - }) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(len(chain.blocks)/2 - 1), - CurrentBlock: uint64(len(chain.blocks) - 1), - HighestBlock: uint64(len(chain.blocks) - 1), - }) -} - func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { // Mark this method as a helper to report errors at callsite, not in here t.Helper() @@ -982,296 +646,12 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync } } -// Tests that synchronisation progress (origin block number and highest block -// number) is tracked and updated correctly in case of a fork (or manual head -// revertal). -func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) } -func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) } -func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) } - -func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Synchronise with one of the forks and check progress - tester.newPeer("fork A", protocol, chainA.blocks[1:]) - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("fork A", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chainA.blocks) - 1), - }) - progress <- struct{}{} - pending.Wait() - - // Simulate a successful sync above the fork - tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight - - // Synchronise with the second fork and check progress resets - tester.newPeer("fork B", protocol, chainB.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("fork B", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ - StartingBlock: uint64(len(testChainBase.blocks)) - 1, - CurrentBlock: uint64(len(chainA.blocks) - 1), - HighestBlock: uint64(len(chainB.blocks) - 1), - }) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(len(testChainBase.blocks)) - 1, - CurrentBlock: uint64(len(chainB.blocks) - 1), - HighestBlock: uint64(len(chainB.blocks) - 1), - }) -} - -// Tests that if synchronisation is aborted due to some failure, then the progress -// origin is not updated in the next sync cycle, as it should be considered the -// continuation of the previous sync and not a new instance. -func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) } -func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) } -func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) } - -func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Attempt a full sync with a faulty peer - missing := len(chain.blocks)/2 - 1 - - faulter := tester.newPeer("faulty", protocol, chain.blocks[1:]) - faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} - - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("faulty", nil, mode); err == nil { - panic("succeeded faulty synchronisation") - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chain.blocks) - 1), - }) - progress <- struct{}{} - pending.Wait() - afterFailedSync := tester.downloader.Progress() - - // Synchronise with a good peer and check that the progress origin remind the same - // after a failure - tester.newPeer("valid", protocol, chain.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("valid", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", afterFailedSync) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(len(chain.blocks) - 1), - HighestBlock: uint64(len(chain.blocks) - 1), - }) -} - -// Tests that if an attacker fakes a chain height, after the attack is detected, -// the progress height is successfully reduced at the next sync invocation. -func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) } -func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) } -func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) } - -func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Create and sync with an attacker that promises a higher chain than available. - attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) - numMissing := 5 - for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- { - attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} - } - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("attack", nil, mode); err == nil { - panic("succeeded attacker synchronisation") - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chain.blocks) - 1), - }) - progress <- struct{}{} - pending.Wait() - afterFailedSync := tester.downloader.Progress() - - // Synchronise with a good peer and check that the progress height has been reduced to - // the true value. - validChain := chain.shorten(len(chain.blocks) - numMissing) - tester.newPeer("valid", protocol, validChain.blocks[1:]) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("valid", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ - CurrentBlock: afterFailedSync.CurrentBlock, - HighestBlock: uint64(len(validChain.blocks) - 1), - }) - // Check final progress after successful sync. - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(len(validChain.blocks) - 1), - HighestBlock: uint64(len(validChain.blocks) - 1), - }) -} - -func TestRemoteHeaderRequestSpan(t *testing.T) { - testCases := []struct { - remoteHeight uint64 - localHeight uint64 - expected []int - }{ - // Remote is way higher. We should ask for the remote head and go backwards - {1500, 1000, - []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, - }, - {15000, 13006, - []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, - }, - // Remote is pretty close to us. We don't have to fetch as many - {1200, 1150, - []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, - }, - // Remote is equal to us (so on a fork with higher td) - // We should get the closest couple of ancestors - {1500, 1500, - []int{1497, 1499}, - }, - // We're higher than the remote! Odd - {1000, 1500, - []int{997, 999}, - }, - // Check some weird edgecases that it behaves somewhat rationally - {0, 1500, - []int{0, 2}, - }, - {6000000, 0, - []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, - }, - {0, 0, - []int{0, 2}, - }, - } - reqs := func(from, count, span int) []int { - var r []int - num := from - for len(r) < count { - r = append(r, num) - num += span + 1 - } - return r - } - for i, tt := range testCases { - from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) - data := reqs(int(from), count, span) - - if max != uint64(data[len(data)-1]) { - t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) - } - failed := false - if len(data) != len(tt.expected) { - failed = true - t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) - } else { - for j, n := range data { - if n != tt.expected[j] { - failed = true - break - } - } - } - if failed { - res := strings.ReplaceAll(fmt.Sprint(data), " ", ",") - exp := strings.ReplaceAll(fmt.Sprint(tt.expected), " ", ",") - t.Logf("got: %v\n", res) - t.Logf("exp: %v\n", exp) - t.Errorf("test %d: wrong values", i) - } - } -} - // Tests that peers below a pre-configured checkpoint block are prevented from // being fast-synced from, avoiding potential cheap eclipse attacks. func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) } func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) } func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { - //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - var cases = []struct { name string // The name of testing scenario local int // The length of local chain(canonical chain assumed), 0 means genesis is the head @@ -1312,81 +692,67 @@ func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { } } -// Tests that synchronisation progress (origin block number and highest block -// number) is tracked and updated correctly in case of manual head reversion -func TestBeaconForkedSyncProgress68Full(t *testing.T) { - testBeaconForkedSyncProgress(t, eth.ETH68, FullSync) -} -func TestBeaconForkedSyncProgress68Snap(t *testing.T) { - testBeaconForkedSyncProgress(t, eth.ETH68, SnapSync) -} -func TestBeaconForkedSyncProgress68Light(t *testing.T) { - testBeaconForkedSyncProgress(t, eth.ETH68, LightSync) -} +// Tests that synchronisation progress (origin block number, current block number +// and highest block number) is tracked and updated correctly. +func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } +func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } +func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) } -func testBeaconForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { +func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { success := make(chan struct{}) tester := newTesterWithNotification(t, func() { success <- struct{}{} }) defer tester.terminate() + checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) + chain := testChainBase.shorten(blockCacheMaxItems - 15) + shortChain := chain.shorten(len(chain.blocks) / 2).blocks[1:] - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress + // Connect to peer that provides all headers and part of the bodies + faultyPeer := tester.newPeer("peer-half", protocol, shortChain) + for _, header := range shortChain { + faultyPeer.withholdBodies[header.Hash()] = struct{}{} } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Synchronise with one of the forks and check progress - tester.newPeer("fork A", protocol, chainA.blocks[1:]) - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.downloader.BeaconSync(mode, chainA.blocks[len(chainA.blocks)-1].Header(), nil); err != nil { - panic(fmt.Sprintf("failed to beacon sync: %v", err)) - } - }() - <-starting - progress <- struct{}{} + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)/2-1].Header(), nil); err != nil { + t.Fatalf("failed to beacon-sync chain: %v", err) + } select { case <-success: - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chainA.blocks) - 1), - CurrentBlock: uint64(len(chainA.blocks) - 1), + // Ok, downloader fully cancelled after sync cycle + checkProgress(t, tester.downloader, "peer-half", ethereum.SyncProgress{ + CurrentBlock: uint64(len(chain.blocks)/2 - 1), + HighestBlock: uint64(len(chain.blocks)/2 - 1), }) case <-time.NewTimer(time.Second * 3).C: t.Fatalf("Failed to sync chain in three seconds") } - // Set the head to a second fork - tester.newPeer("fork B", protocol, chainB.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.downloader.BeaconSync(mode, chainB.blocks[len(chainB.blocks)-1].Header(), nil); err != nil { - panic(fmt.Sprintf("failed to beacon sync: %v", err)) - } - }() - - <-starting - progress <- struct{}{} + // Synchronise all the blocks and check continuation progress + tester.newPeer("peer-full", protocol, chain.blocks[1:]) + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + t.Fatalf("failed to beacon-sync chain: %v", err) + } + var startingBlock uint64 + if mode == LightSync { + // in light-sync mode: + // * the starting block is 0 on the second sync cycle because blocks + // are never downloaded. + // * The current/highest blocks reported in the progress reflect the + // current/highest header. + startingBlock = 0 + } else { + startingBlock = uint64(len(chain.blocks)/2 - 1) + } - // reorg below available state causes the state sync to rewind to genesis select { case <-success: - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chainB.blocks) - 1), - CurrentBlock: uint64(len(chainB.blocks) - 1), - StartingBlock: 0, + // Ok, downloader fully cancelled after sync cycle + checkProgress(t, tester.downloader, "peer-full", ethereum.SyncProgress{ + StartingBlock: startingBlock, + CurrentBlock: uint64(len(chain.blocks) - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) case <-time.NewTimer(time.Second * 3).C: t.Fatalf("Failed to sync chain in three seconds") diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go index cc4279b0da..4ebb9bbc98 100644 --- a/eth/downloader/fetchers.go +++ b/eth/downloader/fetchers.go @@ -68,48 +68,3 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil } } - -// fetchHeadersByNumber is a blocking version of Peer.RequestHeadersByNumber which -// handles all the cancellation, interruption and timeout mechanisms of a data -// retrieval to allow blocking API calls. -func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) { - // Create the response sink and send the network request - start := time.Now() - resCh := make(chan *eth.Response) - - req, err := p.peer.RequestHeadersByNumber(number, amount, skip, reverse, resCh) - if err != nil { - return nil, nil, err - } - defer req.Close() - - // Wait until the response arrives, the request is cancelled or times out - ttl := d.peers.rates.TargetTimeout() - - timeoutTimer := time.NewTimer(ttl) - defer timeoutTimer.Stop() - - select { - case <-d.cancelCh: - return nil, nil, errCanceled - - case <-timeoutTimer.C: - // Header retrieval timed out, update the metrics - p.log.Debug("Header request timed out", "elapsed", ttl) - headerTimeoutMeter.Mark(1) - - return nil, nil, errTimeout - - case res := <-resCh: - // Headers successfully retrieved, update the metrics - headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) - - // Don't reject the packet even if it turns out to be bad, downloader will - // disconnect the peer on its own terms. Simply delivery the headers to - // be processed by the caller - res.Done <- nil - - return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil - } -} diff --git a/eth/downloader/fetchers_concurrent.go b/eth/downloader/fetchers_concurrent.go index 649aa27615..9d8cd114c1 100644 --- a/eth/downloader/fetchers_concurrent.go +++ b/eth/downloader/fetchers_concurrent.go @@ -76,7 +76,7 @@ type typedQueue interface { // concurrentFetch iteratively downloads scheduled block parts, taking available // peers, reserving a chunk of fetch requests for each and waiting for delivery // or timeouts. -func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { +func (d *Downloader) concurrentFetch(queue typedQueue) error { // Create a delivery channel to accept responses from all peers responses := make(chan *eth.Response) @@ -126,10 +126,6 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { // Prepare the queue and fetch block parts until the block header fetcher's done finished := false for { - // Short circuit if we lost all our peers - if d.peers.Len() == 0 && !beaconMode { - return errNoPeers - } // If there's nothing more to fetch, wait or terminate if queue.pending() == 0 { if len(pending) == 0 && finished { @@ -158,27 +154,20 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { } sort.Sort(&peerCapacitySort{idles, caps}) - var ( - progressed bool - throttled bool - queued = queue.pending() - ) + var throttled bool for _, peer := range idles { // Short circuit if throttling activated or there are no more // queued tasks to be retrieved if throttled { break } - if queued = queue.pending(); queued == 0 { + if queued := queue.pending(); queued == 0 { break } // Reserve a chunk of fetches for a peer. A nil can mean either that // no more headers are available, or that the peer is known not to // have them. - request, progress, throttle := queue.reserve(peer, queue.capacity(peer, d.peers.rates.TargetRoundTrip())) - if progress { - progressed = true - } + request, _, throttle := queue.reserve(peer, queue.capacity(peer, d.peers.rates.TargetRoundTrip())) if throttle { throttled = true throttleCounter.Inc(1) @@ -207,11 +196,6 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { timeout.Reset(ttl) } } - // Make sure that we have peers available for fetching. If all peers have been tried - // and all failed throw an error - if !progressed && !throttled && len(pending) == 0 && len(idles) == d.peers.Len() && queued > 0 && !beaconMode { - return errPeersUnavailable - } } // Wait for something to happen select { @@ -315,16 +299,6 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { queue.updateCapacity(peer, 0, 0) } else { d.dropPeer(peer.id) - - // If this peer was the master peer, abort sync immediately - d.cancelLock.RLock() - master := peer.id == d.cancelPeer - d.cancelLock.RUnlock() - - if master { - d.cancel() - return errTimeout - } } case res := <-responses: diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go index 5105fda66b..56359b33c9 100644 --- a/eth/downloader/fetchers_concurrent_bodies.go +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -78,7 +78,6 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan if q.bodyFetchHook != nil { q.bodyFetchHook(req.Headers) } - hashes := make([]common.Hash, 0, len(req.Headers)) for _, header := range req.Headers { hashes = append(hashes, header.Hash()) diff --git a/eth/downloader/fetchers_concurrent_headers.go b/eth/downloader/fetchers_concurrent_headers.go deleted file mode 100644 index 8201f4ca74..0000000000 --- a/eth/downloader/fetchers_concurrent_headers.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/log" -) - -// headerQueue implements typedQueue and is a type adapter between the generic -// concurrent fetcher and the downloader. -type headerQueue Downloader - -// waker returns a notification channel that gets pinged in case more header -// fetches have been queued up, so the fetcher might assign it to idle peers. -func (q *headerQueue) waker() chan bool { - return q.queue.headerContCh -} - -// pending returns the number of headers that are currently queued for fetching -// by the concurrent downloader. -func (q *headerQueue) pending() int { - return q.queue.PendingHeaders() -} - -// capacity is responsible for calculating how many headers a particular peer is -// estimated to be able to retrieve within the allotted round trip time. -func (q *headerQueue) capacity(peer *peerConnection, rtt time.Duration) int { - return peer.HeaderCapacity(rtt) -} - -// updateCapacity is responsible for updating how many headers a particular peer -// is estimated to be able to retrieve in a unit time. -func (q *headerQueue) updateCapacity(peer *peerConnection, items int, span time.Duration) { - peer.UpdateHeaderRate(items, span) -} - -// reserve is responsible for allocating a requested number of pending headers -// from the download queue to the specified peer. -func (q *headerQueue) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) { - return q.queue.ReserveHeaders(peer, items), false, false -} - -// unreserve is responsible for removing the current header retrieval allocation -// assigned to a specific peer and placing it back into the pool to allow -// reassigning to some other peer. -func (q *headerQueue) unreserve(peer string) int { - fails := q.queue.ExpireHeaders(peer) - if fails > 2 { - log.Trace("Header delivery timed out", "peer", peer) - } else { - log.Debug("Header delivery stalling", "peer", peer) - } - return fails -} - -// request is responsible for converting a generic fetch request into a header -// one and sending it to the remote peer for fulfillment. -func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) { - peer.log.Trace("Requesting new batch of headers", "from", req.From) - return peer.peer.RequestHeadersByNumber(req.From, MaxHeaderFetch, 0, false, resCh) -} - -// deliver is responsible for taking a generic response packet from the concurrent -// fetcher, unpacking the header data and delivering it to the downloader's queue. -func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - headers := *packet.Res.(*eth.BlockHeadersRequest) - hashes := packet.Meta.([]common.Hash) - - accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh) - switch { - case err == nil && len(headers) == 0: - peer.log.Trace("Requested headers delivered") - case err == nil: - peer.log.Trace("Delivered new batch of headers", "count", len(headers), "accepted", accepted) - default: - peer.log.Debug("Failed to deliver retrieved headers", "err", err) - } - return accepted, err -} diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 6ff858d755..267c23407f 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -87,6 +87,15 @@ func newFetchResult(header *types.Header, fastSync bool) *fetchResult { return item } +// body returns a representation of the fetch result as a types.Body object. +func (f *fetchResult) body() types.Body { + return types.Body{ + Transactions: f.Transactions, + Uncles: f.Uncles, + Withdrawals: f.Withdrawals, + } +} + // SetBodyDone flags the body as finished. func (f *fetchResult) SetBodyDone() { if v := f.pending.Load(); (v & (1 << bodyType)) != 0 { diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go index 5697374ab5..123c8276e6 100644 --- a/eth/downloader/testchain_test.go +++ b/eth/downloader/testchain_test.go @@ -58,7 +58,6 @@ var pregenerated bool func init() { // Reduce some of the parameters to make the tester faster fullMaxForkAncestry = 10000 - lightMaxForkAncestry = 10000 blockCacheMaxItems = 1024 fsHeaderSafetyNet = 256 fsHeaderContCheck = 500 * time.Millisecond diff --git a/eth/gasestimator/gasestimator.go b/eth/gasestimator/gasestimator.go index f6d8358574..a9e572059d 100644 --- a/eth/gasestimator/gasestimator.go +++ b/eth/gasestimator/gasestimator.go @@ -86,6 +86,16 @@ func Estimate(ctx context.Context, call *core.Message, opts *Options, gasCap uin } available.Sub(available, call.Value) } + if opts.Config.IsCancun(opts.Header.Number, opts.Header.Time, types.DeserializeHeaderExtraInformation(opts.Header).ArbOSFormatVersion) && len(call.BlobHashes) > 0 { + blobGasPerBlob := new(big.Int).SetInt64(params.BlobTxBlobGasPerBlob) + blobBalanceUsage := new(big.Int).SetInt64(int64(len(call.BlobHashes))) + blobBalanceUsage.Mul(blobBalanceUsage, blobGasPerBlob) + blobBalanceUsage.Mul(blobBalanceUsage, call.BlobGasFeeCap) + if blobBalanceUsage.Cmp(available) >= 0 { + return 0, nil, core.ErrInsufficientFunds + } + available.Sub(available, blobBalanceUsage) + } allowance := new(big.Int).Div(available, feeCap) // If the allowance is larger than maximum uint64, skip checking diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 0410ae6b2d..d039bcb401 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -44,6 +44,7 @@ const ( // maxBlockFetchers is the max number of goroutines to spin up to pull blocks // for the fee history calculation (mostly relevant for LES). maxBlockFetchers = 4 + maxQueryLimit = 100 ) // blockFees represents a single block for processing @@ -240,6 +241,9 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks uint64, unresolvedL if len(rewardPercentiles) != 0 { maxFeeHistory = oracle.maxBlockHistory } + if len(rewardPercentiles) > maxQueryLimit { + return common.Big0, nil, nil, nil, nil, nil, fmt.Errorf("%w: over the query limit %d", errInvalidPercentile, maxQueryLimit) + } if blocks > maxFeeHistory { log.Warn("Sanitizing fee history length", "requested", blocks, "truncated", maxFeeHistory) blocks = maxFeeHistory diff --git a/eth/handler.go b/eth/handler.go index c7c582af40..143ac2a8a5 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -42,7 +42,6 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/triedb/pathdb" - "golang.org/x/crypto/sha3" ) const ( @@ -480,7 +479,7 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) { var ( signer = types.LatestSignerForChainID(h.chain.Config().ChainID) // Don't care about chain status, we just need *a* sender - hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher = crypto.NewKeccakState() hash = make([]byte, 32) ) for _, tx := range txs { diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index 96656afb1b..bdc630a9f4 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -190,7 +190,7 @@ func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHe return headers } { // Last mode: deliver ancestors of H - for i := uint64(1); header != nil && i < count; i++ { + for i := uint64(1); i < count; i++ { header = chain.GetHeaderByHash(header.ParentHash) if header == nil { break diff --git a/eth/protocols/snap/gentrie.go b/eth/protocols/snap/gentrie.go index ca094f5a2d..6255fb221d 100644 --- a/eth/protocols/snap/gentrie.go +++ b/eth/protocols/snap/gentrie.go @@ -132,6 +132,7 @@ func (t *pathTrie) onTrieNode(path []byte, hash common.Hash, blob []byte) { // // The extension node is detected if its path is the prefix of last committed // one and path gap is larger than one. If the path gap is only one byte, + // the current node could either be a full node, or an extension with single // byte key. In either case, no gaps will be left in the path. if t.last != nil && bytes.HasPrefix(t.last, path) && len(t.last)-len(path) > 1 { for i := len(path) + 1; i < len(t.last); i++ { @@ -163,7 +164,7 @@ func (t *pathTrie) deleteAccountNode(path []byte, inner bool) { } else { accountOuterLookupGauge.Inc(1) } - if !rawdb.ExistsAccountTrieNode(t.db, path) { + if !rawdb.HasAccountTrieNode(t.db, path) { return } if inner { @@ -180,7 +181,7 @@ func (t *pathTrie) deleteStorageNode(path []byte, inner bool) { } else { storageOuterLookupGauge.Inc(1) } - if !rawdb.ExistsStorageTrieNode(t.db, t.owner, path) { + if !rawdb.HasStorageTrieNode(t.db, t.owner, path) { return } if inner { diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index b0ddb8e403..ffda718700 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -42,7 +42,6 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" - "golang.org/x/crypto/sha3" ) const ( @@ -2653,7 +2652,7 @@ func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error // Cross reference the requested bytecodes with the response to find gaps // that the serving node is missing - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() hash := make([]byte, 32) codes := make([][]byte, len(req.hashes)) @@ -2901,7 +2900,7 @@ func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error // Cross reference the requested trienodes with the response to find gaps // that the serving node is missing var ( - hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher = crypto.NewKeccakState() hash = make([]byte, 32) nodes = make([][]byte, len(req.hashes)) fills uint64 @@ -3007,7 +3006,7 @@ func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) e // Cross reference the requested bytecodes with the response to find gaps // that the serving node is missing - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() hash := make([]byte, 32) codes := make([][]byte, len(req.hashes)) diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index f35babb731..5f6826373a 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -64,7 +64,7 @@ func TestHashing(t *testing.T) { } } var new = func() { - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() var hash = make([]byte, 32) for i := 0; i < len(bytecodes); i++ { hasher.Reset() @@ -96,7 +96,7 @@ func BenchmarkHashing(b *testing.B) { } } var new = func() { - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() var hash = make([]byte, 32) for i := 0; i < len(bytecodes); i++ { hasher.Reset() diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go index 36fbdf23a9..1815b83c6a 100644 --- a/eth/tracers/native/call_flat.go +++ b/eth/tracers/native/call_flat.go @@ -167,6 +167,9 @@ func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Trac // OnEnter is called when EVM enters a new scope (via call, create or selfdestruct). func (t *flatCallTracer) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + if t.interrupt.Load() { + return + } t.tracer.OnEnter(depth, typ, from, to, input, gas, value) if depth == 0 { @@ -182,6 +185,9 @@ func (t *flatCallTracer) OnEnter(depth int, typ byte, from common.Address, to co // OnExit is called when EVM exits a scope, even if the scope didn't // execute any code. func (t *flatCallTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + if t.interrupt.Load() { + return + } t.tracer.OnExit(depth, output, gasUsed, err, reverted) if depth == 0 { @@ -207,6 +213,9 @@ func (t *flatCallTracer) OnExit(depth int, output []byte, gasUsed uint64, err er } func (t *flatCallTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) { + if t.interrupt.Load() { + return + } t.tracer.OnTxStart(env, tx, from) // Update list of precompiles based on current block rules := env.ChainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time, env.ArbOSVersion) @@ -214,6 +223,9 @@ func (t *flatCallTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction } func (t *flatCallTracer) OnTxEnd(receipt *types.Receipt, err error) { + if t.interrupt.Load() { + return + } t.tracer.OnTxEnd(receipt, err) } diff --git a/eth/tracers/native/call_flat_test.go b/eth/tracers/native/call_flat_test.go new file mode 100644 index 0000000000..d5481b868b --- /dev/null +++ b/eth/tracers/native/call_flat_test.go @@ -0,0 +1,64 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package native_test + +import ( + "errors" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" +) + +func TestCallFlatStop(t *testing.T) { + tracer, err := tracers.DefaultDirectory.New("flatCallTracer", &tracers.Context{}, nil) + require.NoError(t, err) + + // this error should be returned by GetResult + stopError := errors.New("stop error") + + // simulate a transaction + tx := types.NewTx(&types.LegacyTx{ + Nonce: 0, + To: &common.Address{}, + Value: big.NewInt(0), + Gas: 0, + GasPrice: big.NewInt(0), + Data: nil, + }) + + tracer.OnTxStart(&tracing.VMContext{ + ChainConfig: params.MainnetChainConfig, + }, tx, common.Address{}) + + tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, nil, 0, big.NewInt(0)) + + // stop before the transaction is finished + tracer.Stop(stopError) + + tracer.OnTxEnd(&types.Receipt{GasUsed: 0}, nil) + + // check that the error is returned by GetResult + _, tracerError := tracer.GetResult() + require.Equal(t, stopError, tracerError) +} diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index e14e224025..225b4a2d30 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -191,7 +191,12 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface } txs[i] = tx.tx } - return types.NewBlockWithHeader(head).WithBody(txs, uncles).WithWithdrawals(body.Withdrawals), nil + return types.NewBlockWithHeader(head).WithBody( + types.Body{ + Transactions: txs, + Uncles: uncles, + Withdrawals: body.Withdrawals, + }), nil } // HeaderByHash returns the block header with the given hash. diff --git a/ethdb/database.go b/ethdb/database.go index f8e9be0ca3..de65a0371f 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -88,8 +88,8 @@ type AncientReaderOp interface { // Ancients returns the ancient item numbers in the ancient store. Ancients() (uint64, error) - // Tail returns the number of first stored item in the freezer. - // This number can also be interpreted as the total deleted item numbers. + // Tail returns the number of first stored item in the ancient store. + // This number can also be interpreted as the total deleted items. Tail() (uint64, error) // AncientSize returns the ancient size of the specified category. @@ -101,7 +101,7 @@ type AncientReader interface { AncientReaderOp // ReadAncients runs the given read operation while ensuring that no writes take place - // on the underlying freezer. + // on the underlying ancient store. ReadAncients(fn func(AncientReaderOp) error) (err error) } @@ -141,11 +141,15 @@ type AncientWriteOp interface { AppendRaw(kind string, number uint64, item []byte) error } -// AncientStater wraps the Stat method of a backing data store. +// AncientStater wraps the Stat method of a backing ancient store. type AncientStater interface { - // AncientDatadir returns the path of root ancient directory. Empty string - // will be returned if ancient store is not enabled at all. The returned - // path can be used to construct the path of other freezers. + // AncientDatadir returns the path of the ancient store directory. + // + // If the ancient store is not activated, an error is returned. + // If an ephemeral ancient store is used, an empty path is returned. + // + // The path returned by AncientDatadir can be used as the root path + // of the ancient store to construct paths for other sub ancient stores. AncientDatadir() (string, error) } @@ -171,7 +175,7 @@ type Stater interface { } // AncientStore contains all the methods required to allow handling different -// ancient data stores backing immutable chain data store. +// ancient data stores backing immutable data store. type AncientStore interface { AncientReader AncientWriter @@ -182,8 +186,16 @@ type WasmDataBaseRetriever interface { WasmDataBase() (KeyValueStore, uint32) } +// ResettableAncientStore extends the AncientStore interface by adding a Reset method. +type ResettableAncientStore interface { + AncientStore + + // Reset is designed to reset the entire ancient store to its default state. + Reset() error +} + // Database contains all the methods required by the high level database to not -// only access the key-value data store but also the chain freezer. +// only access the key-value data store but also the ancient chain store. type Database interface { Reader Writer diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 6b529d82bc..bb60427652 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -534,16 +534,12 @@ func (d *Database) meter(refresh time.Duration, namespace string) { // Create storage and warning log tracer for write delay. var ( - compTimes [2]int64 - writeDelayTimes [2]int64 - writeDelayCounts [2]int64 - compWrites [2]int64 - compReads [2]int64 + compTimes [2]int64 + compWrites [2]int64 + compReads [2]int64 nWrites [2]int64 - lastWriteStallReport time.Time - commitCounts [2]int64 commitTotalDurations [2]int64 commitSemaphoreWaits [2]int64 @@ -551,6 +547,9 @@ func (d *Database) meter(refresh time.Duration, namespace string) { commitL0ReadAmpWriteStalls [2]int64 commitWALRotations [2]int64 commitWaits [2]int64 + writeDelayTimes [2]int64 + writeDelayCounts [2]int64 + lastWriteStallReport time.Time ) // Iterate ad infinitum and collect the stats diff --git a/internal/era/era.go b/internal/era/era.go index 2b9e622901..6ad7339b36 100644 --- a/internal/era/era.go +++ b/internal/era/era.go @@ -151,7 +151,7 @@ func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) { if err := rlp.Decode(r, &body); err != nil { return nil, err } - return types.NewBlockWithHeader(&header).WithBody(body.Transactions, body.Uncles), nil + return types.NewBlockWithHeader(&header).WithBody(body), nil } // Accumulator reads the accumulator entry in the Era1 file. diff --git a/internal/era/iterator.go b/internal/era/iterator.go index cc4f27c201..f48aab46b4 100644 --- a/internal/era/iterator.go +++ b/internal/era/iterator.go @@ -73,7 +73,7 @@ func (it *Iterator) Block() (*types.Block, error) { if err := rlp.Decode(it.inner.Body, &body); err != nil { return nil, err } - return types.NewBlockWithHeader(&header).WithBody(body.Transactions, body.Uncles), nil + return types.NewBlockWithHeader(&header).WithBody(body), nil } // Receipts returns the receipts for the iterator's current position. diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 2a95371033..d0e2f723b3 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -150,7 +150,7 @@ func (s *EthereumAPI) BlobBaseFee(ctx context.Context) *hexutil.Big { } // Syncing returns false in case the node is currently not syncing with the network. It can be up-to-date or has not -// yet received the latest block headers from its pears. In case it is synchronizing: +// yet received the latest block headers from its peers. In case it is synchronizing: // - startingBlock: block number this node started to synchronize from // - currentBlock: block number this node is currently importing // - highestBlock: block number of the highest block header this node has received from peers @@ -1742,6 +1742,9 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH prevTracer = logger.NewAccessListTracer(*args.AccessList, args.from(), to, precompiles) } for { + if err := ctx.Err(); err != nil { + return nil, 0, nil, err + } // Retrieve the current access list to expand accessList := prevTracer.AccessList() log.Trace("Creating access list", "input", accessList) diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 5863c465ba..49d50658f1 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -1357,7 +1357,7 @@ func TestRPCMarshalBlock(t *testing.T) { } txs = append(txs, tx) } - block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, blocktest.NewHasher()) + block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, &types.Body{Transactions: txs}, nil, blocktest.NewHasher()) var testSuite = []struct { inclTx bool @@ -1568,7 +1568,7 @@ func TestRPCGetBlockOrHeader(t *testing.T) { Address: common.Address{0x12, 0x34}, Amount: 10, } - pending = types.NewBlockWithWithdrawals(&types.Header{Number: big.NewInt(11), Time: 42}, []*types.Transaction{tx}, nil, nil, []*types.Withdrawal{withdrawal}, blocktest.NewHasher()) + pending = types.NewBlock(&types.Header{Number: big.NewInt(11), Time: 42}, &types.Body{Transactions: types.Transactions{tx}, Withdrawals: types.Withdrawals{withdrawal}}, nil, blocktest.NewHasher()) ) backend := newTestBackend(t, genBlocks, genesis, ethash.NewFaker(), func(i int, b *core.BlockGen) { // Transfer from account[0] to account[1] diff --git a/miner/miner_test.go b/miner/miner_test.go index 64a54eaf5f..4c867a24a5 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -78,7 +78,7 @@ func (bc *testBlockChain) CurrentBlock() *types.Header { } func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { - return types.NewBlock(bc.CurrentBlock(), nil, nil, nil, trie.NewStackTrie(nil)) + return types.NewBlock(bc.CurrentBlock(), nil, nil, trie.NewStackTrie(nil)) } func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { diff --git a/node/node.go b/node/node.go index 9a3a50e1c1..0310a29168 100644 --- a/node/node.go +++ b/node/node.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/ethdb/pebble" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" @@ -794,7 +795,7 @@ func (n *Node) OpenDatabaseWithFreezerWithExtraOptions(name string, cache, handl var db ethdb.Database var err error if n.config.DataDir == "" { - db = rawdb.NewMemoryDatabase() + db, err = rawdb.NewDatabaseWithFreezer(memorydb.New(), "", namespace, readonly) } else { db, err = rawdb.Open(rawdb.OpenOptions{ Type: n.config.DBEngine, diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index 44b1f5305c..7a0a0f1c77 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -548,7 +548,7 @@ func (t *UDPv4) handlePacket(from *net.UDPAddr, buf []byte) error { } packet := t.wrapPacket(rawpacket) fromID := fromKey.ID() - if err == nil && packet.preverify != nil { + if packet.preverify != nil { err = packet.preverify(packet, from, fromID, fromKey) } t.log.Trace("<< "+packet.Name(), "id", fromID, "addr", from, "err", err) diff --git a/p2p/discover/v5wire/encoding_test.go b/p2p/discover/v5wire/encoding_test.go index a5387311a5..27966f2afc 100644 --- a/p2p/discover/v5wire/encoding_test.go +++ b/p2p/discover/v5wire/encoding_test.go @@ -30,6 +30,7 @@ import ( "testing" "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/crypto" @@ -283,9 +284,38 @@ func TestDecodeErrorsV5(t *testing.T) { b = make([]byte, 63) net.nodeA.expectDecodeErr(t, errInvalidHeader, b) - // TODO some more tests would be nice :) - // - check invalid authdata sizes - // - check invalid handshake data sizes + t.Run("invalid-handshake-datasize", func(t *testing.T) { + requiredNumber := 108 + + testDataFile := filepath.Join("testdata", "v5.1-ping-handshake"+".txt") + enc := hexFile(testDataFile) + //delete some byte from handshake to make it invalid + enc = enc[:len(enc)-requiredNumber] + net.nodeB.expectDecodeErr(t, errMsgTooShort, enc) + }) + + t.Run("invalid-auth-datasize", func(t *testing.T) { + testPacket := []byte{} + testDataFiles := []string{"v5.1-whoareyou", "v5.1-ping-handshake"} + for counter, name := range testDataFiles { + file := filepath.Join("testdata", name+".txt") + enc := hexFile(file) + if counter == 0 { + //make whoareyou header + testPacket = enc[:sizeofStaticPacketData-1] + testPacket = append(testPacket, 255) + } + if counter == 1 { + //append invalid auth size + testPacket = append(testPacket, enc[sizeofStaticPacketData:]...) + } + } + + wantErr := "invalid auth size" + if _, err := net.nodeB.decode(testPacket); strings.HasSuffix(err.Error(), wantErr) { + t.Fatal(fmt.Errorf("(%s) got err %q, want %q", net.nodeB.ln.ID().TerminalString(), err, wantErr)) + } + }) } // This test checks that all test vectors can be decoded. diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go index 349e496b2f..0efe9744a5 100644 --- a/p2p/simulations/adapters/inproc.go +++ b/p2p/simulations/adapters/inproc.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "maps" "math" "net" "sync" @@ -215,10 +216,7 @@ func (sn *SimNode) ServeRPC(conn *websocket.Conn) error { // simulation_snapshot RPC method func (sn *SimNode) Snapshots() (map[string][]byte, error) { sn.lock.RLock() - services := make(map[string]node.Lifecycle, len(sn.running)) - for name, service := range sn.running { - services[name] = service - } + services := maps.Clone(sn.running) sn.lock.RUnlock() if len(services) == 0 { return nil, errors.New("no running services") @@ -315,11 +313,7 @@ func (sn *SimNode) Services() []node.Lifecycle { func (sn *SimNode) ServiceMap() map[string]node.Lifecycle { sn.lock.RLock() defer sn.lock.RUnlock() - services := make(map[string]node.Lifecycle, len(sn.running)) - for name, service := range sn.running { - services[name] = service - } - return services + return maps.Clone(sn.running) } // Server returns the underlying p2p.Server diff --git a/params/config.go b/params/config.go index 865ec4248e..c39766359e 100644 --- a/params/config.go +++ b/params/config.go @@ -380,7 +380,7 @@ type ChainConfig struct { type EthashConfig struct{} // String implements the stringer interface, returning the consensus engine details. -func (c *EthashConfig) String() string { +func (c EthashConfig) String() string { return "ethash" } @@ -391,8 +391,8 @@ type CliqueConfig struct { } // String implements the stringer interface, returning the consensus engine details. -func (c *CliqueConfig) String() string { - return "clique" +func (c CliqueConfig) String() string { + return fmt.Sprintf("clique(period: %d, epoch: %d)", c.Period, c.Epoch) } // Description returns a human-readable description of ChainConfig. @@ -587,12 +587,12 @@ func (c *ChainConfig) IsCancun(num *big.Int, time uint64, currentArbosVersion ui return c.IsLondon(num) && isTimestampForked(c.CancunTime, time) } -// IsPrague returns whether num is either equal to the Prague fork time or greater. +// IsPrague returns whether time is either equal to the Prague fork time or greater. func (c *ChainConfig) IsPrague(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.PragueTime, time) } -// IsVerkle returns whether num is either equal to the Verkle fork time or greater. +// IsVerkle returns whether time is either equal to the Verkle fork time or greater. func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time) } @@ -900,7 +900,7 @@ func newTimestampCompatError(what string, storedtime, newtime *uint64) *ConfigCo NewTime: newtime, RewindToTime: 0, } - if rew != nil { + if rew != nil && *rew != 0 { err.RewindToTime = *rew - 1 } return err @@ -910,7 +910,15 @@ func (err *ConfigCompatError) Error() string { if err.StoredBlock != nil { return fmt.Sprintf("mismatching %s in database (have block %d, want block %d, rewindto block %d)", err.What, err.StoredBlock, err.NewBlock, err.RewindToBlock) } - return fmt.Sprintf("mismatching %s in database (have timestamp %d, want timestamp %d, rewindto timestamp %d)", err.What, err.StoredTime, err.NewTime, err.RewindToTime) + + if err.StoredTime == nil && err.NewTime == nil { + return "" + } else if err.StoredTime == nil && err.NewTime != nil { + return fmt.Sprintf("mismatching %s in database (have timestamp nil, want timestamp %d, rewindto timestamp %d)", err.What, *err.NewTime, err.RewindToTime) + } else if err.StoredTime != nil && err.NewTime == nil { + return fmt.Sprintf("mismatching %s in database (have timestamp %d, want timestamp nil, rewindto timestamp %d)", err.What, *err.StoredTime, err.RewindToTime) + } + return fmt.Sprintf("mismatching %s in database (have timestamp %d, want timestamp %d, rewindto timestamp %d)", err.What, *err.StoredTime, *err.NewTime, err.RewindToTime) } // Rules wraps ChainConfig and is merely syntactic sugar or can be used for functions diff --git a/params/config_test.go b/params/config_test.go index 1d03d96739..ee91615463 100644 --- a/params/config_test.go +++ b/params/config_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common/math" + "github.com/stretchr/testify/require" ) func TestCheckCompatible(t *testing.T) { @@ -140,3 +141,20 @@ func TestConfigRules(t *testing.T) { t.Errorf("expected %v to be shanghai", currentArbosVersion) } } + +func TestTimestampCompatError(t *testing.T) { + require.Equal(t, new(ConfigCompatError).Error(), "") + + errWhat := "Shanghai fork timestamp" + require.Equal(t, newTimestampCompatError(errWhat, nil, newUint64(1681338455)).Error(), + "mismatching Shanghai fork timestamp in database (have timestamp nil, want timestamp 1681338455, rewindto timestamp 1681338454)") + + require.Equal(t, newTimestampCompatError(errWhat, newUint64(1681338455), nil).Error(), + "mismatching Shanghai fork timestamp in database (have timestamp 1681338455, want timestamp nil, rewindto timestamp 1681338454)") + + require.Equal(t, newTimestampCompatError(errWhat, newUint64(1681338455), newUint64(600624000)).Error(), + "mismatching Shanghai fork timestamp in database (have timestamp 1681338455, want timestamp 600624000, rewindto timestamp 600623999)") + + require.Equal(t, newTimestampCompatError(errWhat, newUint64(0), newUint64(1681338455)).Error(), + "mismatching Shanghai fork timestamp in database (have timestamp 0, want timestamp 1681338455, rewindto timestamp 0)") +} diff --git a/params/version.go b/params/version.go index b3978be046..ac4f930c6f 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 14 // Minor version component of the current release - VersionPatch = 0 // Patch version component of the current release + VersionPatch = 2 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/trie/hasher.go b/trie/hasher.go index 1e063d8020..abf654c709 100644 --- a/trie/hasher.go +++ b/trie/hasher.go @@ -21,7 +21,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" ) // hasher is a type used for the trie Hash operation. A hasher has some @@ -38,7 +37,7 @@ var hasherPool = sync.Pool{ New: func() interface{} { return &hasher{ tmp: make([]byte, 0, 550), // cap is as large as a full fullNode. - sha: sha3.NewLegacyKeccak256().(crypto.KeccakState), + sha: crypto.NewKeccakState(), encbuf: rlp.NewEncoderBuffer(nil), } }, diff --git a/trie/stacktrie_fuzzer_test.go b/trie/stacktrie_fuzzer_test.go index 5126e0bd07..418b941d94 100644 --- a/trie/stacktrie_fuzzer_test.go +++ b/trie/stacktrie_fuzzer_test.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/trie/trienode" - "golang.org/x/crypto/sha3" ) func FuzzStackTrie(f *testing.F) { @@ -41,10 +40,10 @@ func fuzz(data []byte, debugging bool) { // This spongeDb is used to check the sequence of disk-db-writes var ( input = bytes.NewReader(data) - spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()} + spongeA = &spongeDb{sponge: crypto.NewKeccakState()} dbA = newTestDatabase(rawdb.NewDatabase(spongeA), rawdb.HashScheme) trieA = NewEmpty(dbA) - spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()} + spongeB = &spongeDb{sponge: crypto.NewKeccakState()} dbB = newTestDatabase(rawdb.NewDatabase(spongeB), rawdb.HashScheme) trieB = NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme()) diff --git a/trie/sync.go b/trie/sync.go index 589d28364b..f6b20b2240 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -546,9 +547,9 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) { // the performance impact negligible. var exists bool if owner == (common.Hash{}) { - exists = rawdb.ExistsAccountTrieNode(s.database, append(inner, key[:i]...)) + exists = rawdb.HasAccountTrieNode(s.database, append(inner, key[:i]...)) } else { - exists = rawdb.ExistsStorageTrieNode(s.database, owner, append(inner, key[:i]...)) + exists = rawdb.HasStorageTrieNode(s.database, owner, append(inner, key[:i]...)) } if exists { s.membatch.delNode(owner, append(inner, key[:i]...)) @@ -691,13 +692,14 @@ func (s *Sync) hasNode(owner common.Hash, path []byte, hash common.Hash) (exists } // If node is running with path scheme, check the presence with node path. var blob []byte - var dbHash common.Hash if owner == (common.Hash{}) { - blob, dbHash = rawdb.ReadAccountTrieNode(s.database, path) + blob = rawdb.ReadAccountTrieNode(s.database, path) } else { - blob, dbHash = rawdb.ReadStorageTrieNode(s.database, owner, path) + blob = rawdb.ReadStorageTrieNode(s.database, owner, path) } - exists = hash == dbHash + h := newBlobHasher() + defer h.release() + exists = hash == h.hash(blob) inconsistent = !exists && len(blob) != 0 return exists, inconsistent } @@ -712,3 +714,23 @@ func ResolvePath(path []byte) (common.Hash, []byte) { } return owner, path } + +// blobHasher is used to compute the sha256 hash of the provided data. +type blobHasher struct{ state crypto.KeccakState } + +// blobHasherPool is the pool for reusing pre-allocated hash state. +var blobHasherPool = sync.Pool{ + New: func() interface{} { return &blobHasher{state: crypto.NewKeccakState()} }, +} + +func newBlobHasher() *blobHasher { + return blobHasherPool.Get().(*blobHasher) +} + +func (h *blobHasher) hash(data []byte) common.Hash { + return crypto.HashData(h.state, data) +} + +func (h *blobHasher) release() { + blobHasherPool.Put(h) +} diff --git a/trie/tracer.go b/trie/tracer.go index 5786af4d3e..90b9666f0b 100644 --- a/trie/tracer.go +++ b/trie/tracer.go @@ -17,6 +17,8 @@ package trie import ( + "maps" + "github.com/ethereum/go-ethereum/common" ) @@ -92,23 +94,13 @@ func (t *tracer) reset() { // copy returns a deep copied tracer instance. func (t *tracer) copy() *tracer { - var ( - inserts = make(map[string]struct{}) - deletes = make(map[string]struct{}) - accessList = make(map[string][]byte) - ) - for path := range t.inserts { - inserts[path] = struct{}{} - } - for path := range t.deletes { - deletes[path] = struct{}{} - } + accessList := make(map[string][]byte, len(t.accessList)) for path, blob := range t.accessList { accessList[path] = common.CopyBytes(blob) } return &tracer{ - inserts: inserts, - deletes: deletes, + inserts: maps.Clone(t.inserts), + deletes: maps.Clone(t.deletes), accessList: accessList, } } diff --git a/trie/trie_test.go b/trie/trie_test.go index 6ecd20c218..da60a7423d 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -886,7 +886,7 @@ func TestCommitSequence(t *testing.T) { } { addresses, accounts := makeAccounts(tc.count) // This spongeDb is used to check the sequence of disk-db-writes - s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} + s := &spongeDb{sponge: crypto.NewKeccakState()} db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme) trie := NewEmpty(db) // Fill the trie with elements @@ -917,7 +917,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) { } { prng := rand.New(rand.NewSource(int64(i))) // This spongeDb is used to check the sequence of disk-db-writes - s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} + s := &spongeDb{sponge: crypto.NewKeccakState()} db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme) trie := NewEmpty(db) // Fill the trie with elements diff --git a/trie/trienode/node.go b/trie/trienode/node.go index 95315c2e9a..aa8a0f6d99 100644 --- a/trie/trienode/node.go +++ b/trie/trienode/node.go @@ -78,7 +78,7 @@ func NewNodeSet(owner common.Hash) *NodeSet { // ForEachWithOrder iterates the nodes with the order from bottom to top, // right to left, nodes with the longest path will be iterated first. func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) { - var paths []string + paths := make([]string, 0, len(set.Nodes)) for path := range set.Nodes { paths = append(paths, path) } @@ -114,7 +114,12 @@ func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error { set.updates -= 1 } } - set.AddNode([]byte(path), node) + if node.IsDeleted() { + set.deletes += 1 + } else { + set.updates += 1 + } + set.Nodes[path] = node } return nil } @@ -130,16 +135,6 @@ func (set *NodeSet) Size() (int, int) { return set.updates, set.deletes } -// Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can -// we get rid of it? -func (set *NodeSet) Hashes() []common.Hash { - var ret []common.Hash - for _, node := range set.Nodes { - ret = append(ret, node.Hash) - } - return ret -} - // Summary returns a string-representation of the NodeSet. func (set *NodeSet) Summary() string { var out = new(strings.Builder) diff --git a/trie/trienode/node_test.go b/trie/trienode/node_test.go new file mode 100644 index 0000000000..bcb3a2202b --- /dev/null +++ b/trie/trienode/node_test.go @@ -0,0 +1,61 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package trienode + +import ( + "crypto/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +func BenchmarkMerge(b *testing.B) { + b.Run("1K", func(b *testing.B) { + benchmarkMerge(b, 1000) + }) + b.Run("10K", func(b *testing.B) { + benchmarkMerge(b, 10_000) + }) +} + +func benchmarkMerge(b *testing.B, count int) { + x := NewNodeSet(common.Hash{}) + y := NewNodeSet(common.Hash{}) + addNode := func(s *NodeSet) { + path := make([]byte, 4) + rand.Read(path) + blob := make([]byte, 32) + rand.Read(blob) + hash := crypto.Keccak256Hash(blob) + s.AddNode(path, New(hash, blob)) + } + for i := 0; i < count; i++ { + // Random path of 4 nibbles + addNode(x) + addNode(y) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + // Store set x into a backup + z := NewNodeSet(common.Hash{}) + z.Merge(common.Hash{}, x.Nodes) + // Merge y into x + x.Merge(common.Hash{}, y.Nodes) + x = z + } +} diff --git a/trie/triestate/state.go b/trie/triestate/state.go index aa4d32f852..9db9211e8c 100644 --- a/trie/triestate/state.go +++ b/trie/triestate/state.go @@ -26,7 +26,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" - "golang.org/x/crypto/sha3" ) // Trie is an Ethereum state trie, can be implemented by Ethereum Merkle Patricia @@ -257,7 +256,7 @@ func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error { type hasher struct{ sha crypto.KeccakState } var hasherPool = sync.Pool{ - New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, + New: func() interface{} { return &hasher{sha: crypto.NewKeccakState()} }, } func newHasher() *hasher { diff --git a/triedb/database.go b/triedb/database.go index 81468b3a8d..c9863bf1e7 100644 --- a/triedb/database.go +++ b/triedb/database.go @@ -20,6 +20,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie" @@ -48,9 +49,6 @@ var HashDefaults = &Config{ // backend defines the methods needed to access/update trie nodes in different // state scheme. type backend interface { - // Scheme returns the identifier of used storage scheme. - Scheme() string - // Initialized returns an indicator if the state data is already initialized // according to the state scheme. Initialized(genesisRoot common.Hash) bool @@ -181,7 +179,10 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool { // Scheme returns the node scheme used in the database. func (db *Database) Scheme() string { - return db.backend.Scheme() + if db.config.PathDB != nil { + return rawdb.PathScheme + } + return rawdb.HashScheme } // Close flushes the dangling preimages to disk and closes the trie database. diff --git a/triedb/database/database.go b/triedb/database/database.go index 18a8f454e2..f11c7e9bbd 100644 --- a/triedb/database/database.go +++ b/triedb/database/database.go @@ -25,6 +25,9 @@ type Reader interface { // Node retrieves the trie node blob with the provided trie identifier, // node path and the corresponding node hash. No error will be returned // if the node is not found. + // + // Don't modify the returned byte slice since it's not deep-copied and + // still be referenced by database. Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) } diff --git a/triedb/hashdb/database.go b/triedb/hashdb/database.go index ef69caf009..bb028bf59a 100644 --- a/triedb/hashdb/database.go +++ b/triedb/hashdb/database.go @@ -632,11 +632,6 @@ func (db *Database) Close() error { return nil } -// Scheme returns the node scheme used in the database. -func (db *Database) Scheme() string { - return rawdb.HashScheme -} - // Reader retrieves a node reader belonging to the given state root. // An error will be returned if the requested state is not available. func (db *Database) Reader(root common.Hash) (*reader, error) { diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index 18f2eeef00..05a28aa1ef 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -131,15 +132,15 @@ type Database struct { // readOnly is the flag whether the mutation is allowed to be applied. // It will be set automatically when the database is journaled during // the shutdown to reject all following unexpected mutations. - readOnly bool // Flag if database is opened in read only mode - waitSync bool // Flag if database is deactivated due to initial state sync - isVerkle bool // Flag if database is used for verkle tree - bufferSize int // Memory allowance (in bytes) for caching dirty nodes - config *Config // Configuration for database - diskdb ethdb.Database // Persistent storage for matured trie nodes - tree *layerTree // The group for all known layers - freezer *rawdb.ResettableFreezer // Freezer for storing trie histories, nil possible in tests - lock sync.RWMutex // Lock to prevent mutations from happening at the same time + readOnly bool // Flag if database is opened in read only mode + waitSync bool // Flag if database is deactivated due to initial state sync + isVerkle bool // Flag if database is used for verkle tree + bufferSize int // Memory allowance (in bytes) for caching dirty nodes + config *Config // Configuration for database + diskdb ethdb.Database // Persistent storage for matured trie nodes + tree *layerTree // The group for all known layers + freezer ethdb.ResettableAncientStore // Freezer for storing trie histories, nil possible in tests + lock sync.RWMutex // Lock to prevent mutations from happening at the same time } // New attempts to load an already existing layer from a persistent key-value @@ -162,45 +163,10 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database { // and in-memory layer journal. db.tree = newLayerTree(db.loadLayers()) - // Open the freezer for state history if the passed database contains an - // ancient store. Otherwise, all the relevant functionalities are disabled. - // - // Because the freezer can only be opened once at the same time, this - // mechanism also ensures that at most one **non-readOnly** database - // is opened at the same time to prevent accidental mutation. - if ancient, err := diskdb.AncientDatadir(); err == nil && ancient != "" && !db.readOnly { - freezer, err := rawdb.NewStateFreezer(ancient, false) - if err != nil { - log.Crit("Failed to open state history freezer", "err", err) - } - db.freezer = freezer - - diskLayerID := db.tree.bottom().stateID() - if diskLayerID == 0 { - // Reset the entire state histories in case the trie database is - // not initialized yet, as these state histories are not expected. - frozen, err := db.freezer.Ancients() - if err != nil { - log.Crit("Failed to retrieve head of state history", "err", err) - } - if frozen != 0 { - err := db.freezer.Reset() - if err != nil { - log.Crit("Failed to reset state histories", "err", err) - } - log.Info("Truncated extraneous state history") - } - } else { - // Truncate the extra state histories above in freezer in case - // it's not aligned with the disk layer. - pruned, err := truncateFromHead(db.diskdb, freezer, diskLayerID) - if err != nil { - log.Crit("Failed to truncate extra state histories", "err", err) - } - if pruned != 0 { - log.Warn("Truncated extra state histories", "number", pruned) - } - } + // Repair the state history, which might not be aligned with the state + // in the key-value store due to an unclean shutdown. + if err := db.repairHistory(); err != nil { + log.Crit("Failed to repair pathdb", "err", err) } // Disable database in case node is still in the initial state sync stage. if rawdb.ReadSnapSyncStatusFlag(diskdb) == rawdb.StateSyncRunning && !db.readOnly { @@ -211,6 +177,55 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database { return db } +// repairHistory truncates leftover state history objects, which may occur due +// to an unclean shutdown or other unexpected reasons. +func (db *Database) repairHistory() error { + // Open the freezer for state history. This mechanism ensures that + // only one database instance can be opened at a time to prevent + // accidental mutation. + ancient, err := db.diskdb.AncientDatadir() + if err != nil { + // TODO error out if ancient store is disabled. A tons of unit tests + // disable the ancient store thus the error here will immediately fail + // all of them. Fix the tests first. + return nil + } + freezer, err := rawdb.NewStateFreezer(ancient, false) + if err != nil { + log.Crit("Failed to open state history freezer", "err", err) + } + db.freezer = freezer + + // Reset the entire state histories if the trie database is not initialized + // yet. This action is necessary because these state histories are not + // expected to exist without an initialized trie database. + id := db.tree.bottom().stateID() + if id == 0 { + frozen, err := db.freezer.Ancients() + if err != nil { + log.Crit("Failed to retrieve head of state history", "err", err) + } + if frozen != 0 { + err := db.freezer.Reset() + if err != nil { + log.Crit("Failed to reset state histories", "err", err) + } + log.Info("Truncated extraneous state history") + } + return nil + } + // Truncate the extra state histories above in freezer in case it's not + // aligned with the disk layer. It might happen after a unclean shutdown. + pruned, err := truncateFromHead(db.diskdb, db.freezer, id) + if err != nil { + log.Crit("Failed to truncate extra state histories", "err", err) + } + if pruned != 0 { + log.Warn("Truncated extra state histories", "number", pruned) + } + return nil +} + // Update adds a new layer into the tree, if that can be linked to an existing // old parent. It is disallowed to insert a disk layer (the origin of all). Apart // from that this function will flatten the extra diff layers at bottom into disk @@ -292,8 +307,10 @@ func (db *Database) Enable(root common.Hash) error { } // Ensure the provided state root matches the stored one. root = types.TrieRootHash(root) - _, stored := rawdb.ReadAccountTrieNode(db.diskdb, nil) - stored = types.TrieRootHash(stored) + stored := types.EmptyRootHash + if blob := rawdb.ReadAccountTrieNode(db.diskdb, nil); len(blob) > 0 { + stored = crypto.Keccak256Hash(blob) + } if stored != root { return fmt.Errorf("state root mismatch: stored %x, synced %x", stored, root) } @@ -466,11 +483,6 @@ func (db *Database) SetBufferSize(size int) error { return db.tree.bottom().setBufferSize(db.bufferSize) } -// Scheme returns the node scheme used in the database. -func (db *Database) Scheme() string { - return rawdb.PathScheme -} - // modifyAllowed returns the indicator if mutation is allowed. This function // assumes the db.lock is already held. func (db *Database) modifyAllowed() error { diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go index 29de534589..7b24082315 100644 --- a/triedb/pathdb/database_test.go +++ b/triedb/pathdb/database_test.go @@ -474,7 +474,7 @@ func TestDisable(t *testing.T) { tester := newTester(t, 0) defer tester.release() - _, stored := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) + stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)) if err := tester.db.Disable(); err != nil { t.Fatalf("Failed to deactivate database: %v", err) } @@ -580,7 +580,7 @@ func TestCorruptedJournal(t *testing.T) { t.Errorf("Failed to journal, err: %v", err) } tester.db.Close() - _, root := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) + root := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)) // Mutate the journal in disk, it should be regarded as invalid blob := rawdb.ReadTrieJournal(tester.db.diskdb) diff --git a/triedb/pathdb/difflayer_test.go b/triedb/pathdb/difflayer_test.go index bf4c6502ef..1e93a3f892 100644 --- a/triedb/pathdb/difflayer_test.go +++ b/triedb/pathdb/difflayer_test.go @@ -70,10 +70,10 @@ func benchmarkSearch(b *testing.B, depth int, total int) { blob = testrand.Bytes(100) node = trienode.New(crypto.Keccak256Hash(blob), blob) ) - nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + nodes[common.Hash{}][string(path)] = node if npath == nil && depth == index { npath = common.CopyBytes(path) - nblob = common.CopyBytes(node.Blob) + nblob = common.CopyBytes(blob) } } return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) @@ -116,7 +116,7 @@ func BenchmarkPersist(b *testing.B) { blob = testrand.Bytes(100) node = trienode.New(crypto.Keccak256Hash(blob), blob) ) - nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + nodes[common.Hash{}][string(path)] = node } return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) } @@ -154,7 +154,7 @@ func BenchmarkJournal(b *testing.B) { blob = testrand.Bytes(100) node = trienode.New(crypto.Keccak256Hash(blob), blob) ) - nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + nodes[common.Hash{}][string(path)] = node } // TODO(rjl493456442) a non-nil state set is expected. return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go index ec7c91bcac..964ad2ef77 100644 --- a/triedb/pathdb/disklayer.go +++ b/triedb/pathdb/disklayer.go @@ -27,7 +27,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" - "golang.org/x/crypto/sha3" ) // diskLayer is a low level persistent layer built on top of a key-value store. @@ -117,12 +116,12 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co dirtyMissMeter.Mark(1) // Try to retrieve the trie node from the clean memory cache + h := newHasher() + defer h.release() + key := cacheKey(owner, path) if dl.cleans != nil { if blob := dl.cleans.Get(nil, key); len(blob) > 0 { - h := newHasher() - defer h.release() - cleanHitMeter.Mark(1) cleanReadMeter.Mark(int64(len(blob))) return blob, h.hash(blob), &nodeLoc{loc: locCleanCache, depth: depth}, nil @@ -130,20 +129,18 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co cleanMissMeter.Mark(1) } // Try to retrieve the trie node from the disk. - var ( - nBlob []byte - nHash common.Hash - ) + var blob []byte if owner == (common.Hash{}) { - nBlob, nHash = rawdb.ReadAccountTrieNode(dl.db.diskdb, path) + blob = rawdb.ReadAccountTrieNode(dl.db.diskdb, path) } else { - nBlob, nHash = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path) + blob = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path) } - if dl.cleans != nil && len(nBlob) > 0 { - dl.cleans.Set(key, nBlob) - cleanWriteMeter.Mark(int64(len(nBlob))) + if dl.cleans != nil && len(blob) > 0 { + dl.cleans.Set(key, blob) + cleanWriteMeter.Mark(int64(len(blob))) } - return nBlob, nHash, &nodeLoc{loc: locDiskLayer, depth: depth}, nil + + return blob, h.hash(blob), &nodeLoc{loc: locDiskLayer, depth: depth}, nil } // update implements the layer interface, returning a new diff layer on top @@ -303,7 +300,7 @@ func (dl *diskLayer) resetCache() { type hasher struct{ sha crypto.KeccakState } var hasherPool = sync.Pool{ - New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, + New: func() interface{} { return &hasher{sha: crypto.NewKeccakState()} }, } func newHasher() *hasher { diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go index 7099b2b381..3663cbbdb9 100644 --- a/triedb/pathdb/history.go +++ b/triedb/pathdb/history.go @@ -472,8 +472,8 @@ func (h *history) decode(accountData, storageData, accountIndexes, storageIndexe } // readHistory reads and decodes the state history object by the given id. -func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) { - blob := rawdb.ReadStateHistoryMeta(freezer, id) +func readHistory(reader ethdb.AncientReader, id uint64) (*history, error) { + blob := rawdb.ReadStateHistoryMeta(reader, id) if len(blob) == 0 { return nil, fmt.Errorf("state history not found %d", id) } @@ -483,10 +483,10 @@ func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) } var ( dec = history{meta: &m} - accountData = rawdb.ReadStateAccountHistory(freezer, id) - storageData = rawdb.ReadStateStorageHistory(freezer, id) - accountIndexes = rawdb.ReadStateAccountIndex(freezer, id) - storageIndexes = rawdb.ReadStateStorageIndex(freezer, id) + accountData = rawdb.ReadStateAccountHistory(reader, id) + storageData = rawdb.ReadStateStorageHistory(reader, id) + accountIndexes = rawdb.ReadStateAccountIndex(reader, id) + storageIndexes = rawdb.ReadStateStorageIndex(reader, id) ) if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil { return nil, err @@ -495,7 +495,7 @@ func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) } // writeHistory persists the state history with the provided state set. -func writeHistory(freezer *rawdb.ResettableFreezer, dl *diffLayer) error { +func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error { // Short circuit if state set is not available. if dl.states == nil { return errors.New("state change set is not available") @@ -509,7 +509,7 @@ func writeHistory(freezer *rawdb.ResettableFreezer, dl *diffLayer) error { indexSize := common.StorageSize(len(accountIndex) + len(storageIndex)) // Write history data into five freezer table respectively. - rawdb.WriteStateHistory(freezer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData) + rawdb.WriteStateHistory(writer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData) historyDataBytesMeter.Mark(int64(dataSize)) historyIndexBytesMeter.Mark(int64(indexSize)) @@ -521,13 +521,13 @@ func writeHistory(freezer *rawdb.ResettableFreezer, dl *diffLayer) error { // checkHistories retrieves a batch of meta objects with the specified range // and performs the callback on each item. -func checkHistories(freezer *rawdb.ResettableFreezer, start, count uint64, check func(*meta) error) error { +func checkHistories(reader ethdb.AncientReader, start, count uint64, check func(*meta) error) error { for count > 0 { number := count if number > 10000 { number = 10000 // split the big read into small chunks } - blobs, err := rawdb.ReadStateHistoryMetaList(freezer, start, number) + blobs, err := rawdb.ReadStateHistoryMetaList(reader, start, number) if err != nil { return err } @@ -548,12 +548,12 @@ func checkHistories(freezer *rawdb.ResettableFreezer, start, count uint64, check // truncateFromHead removes the extra state histories from the head with the given // parameters. It returns the number of items removed from the head. -func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead uint64) (int, error) { - ohead, err := freezer.Ancients() +func truncateFromHead(db ethdb.Batcher, store ethdb.AncientStore, nhead uint64) (int, error) { + ohead, err := store.Ancients() if err != nil { return 0, err } - otail, err := freezer.Tail() + otail, err := store.Tail() if err != nil { return 0, err } @@ -566,7 +566,7 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead return 0, nil } // Load the meta objects in range [nhead+1, ohead] - blobs, err := rawdb.ReadStateHistoryMetaList(freezer, nhead+1, ohead-nhead) + blobs, err := rawdb.ReadStateHistoryMetaList(store, nhead+1, ohead-nhead) if err != nil { return 0, err } @@ -581,7 +581,7 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead if err := batch.Write(); err != nil { return 0, err } - ohead, err = freezer.TruncateHead(nhead) + ohead, err = store.TruncateHead(nhead) if err != nil { return 0, err } @@ -590,12 +590,12 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead // truncateFromTail removes the extra state histories from the tail with the given // parameters. It returns the number of items removed from the tail. -func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail uint64) (int, error) { - ohead, err := freezer.Ancients() +func truncateFromTail(db ethdb.Batcher, store ethdb.AncientStore, ntail uint64) (int, error) { + ohead, err := store.Ancients() if err != nil { return 0, err } - otail, err := freezer.Tail() + otail, err := store.Tail() if err != nil { return 0, err } @@ -608,7 +608,7 @@ func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail return 0, nil } // Load the meta objects in range [otail+1, ntail] - blobs, err := rawdb.ReadStateHistoryMetaList(freezer, otail+1, ntail-otail) + blobs, err := rawdb.ReadStateHistoryMetaList(store, otail+1, ntail-otail) if err != nil { return 0, err } @@ -623,7 +623,7 @@ func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail if err := batch.Write(); err != nil { return 0, err } - otail, err = freezer.TruncateTail(ntail) + otail, err = store.TruncateTail(ntail) if err != nil { return 0, err } diff --git a/triedb/pathdb/history_inspect.go b/triedb/pathdb/history_inspect.go index d8a761b916..240474da37 100644 --- a/triedb/pathdb/history_inspect.go +++ b/triedb/pathdb/history_inspect.go @@ -21,7 +21,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -34,7 +34,7 @@ type HistoryStats struct { } // sanitizeRange limits the given range to fit within the local history store. -func sanitizeRange(start, end uint64, freezer *rawdb.ResettableFreezer) (uint64, uint64, error) { +func sanitizeRange(start, end uint64, freezer ethdb.AncientReader) (uint64, uint64, error) { // Load the id of the first history object in local store. tail, err := freezer.Tail() if err != nil { @@ -60,7 +60,7 @@ func sanitizeRange(start, end uint64, freezer *rawdb.ResettableFreezer) (uint64, return first, last, nil } -func inspectHistory(freezer *rawdb.ResettableFreezer, start, end uint64, onHistory func(*history, *HistoryStats)) (*HistoryStats, error) { +func inspectHistory(freezer ethdb.AncientReader, start, end uint64, onHistory func(*history, *HistoryStats)) (*HistoryStats, error) { var ( stats = &HistoryStats{} init = time.Now() @@ -96,7 +96,7 @@ func inspectHistory(freezer *rawdb.ResettableFreezer, start, end uint64, onHisto } // accountHistory inspects the account history within the range. -func accountHistory(freezer *rawdb.ResettableFreezer, address common.Address, start, end uint64) (*HistoryStats, error) { +func accountHistory(freezer ethdb.AncientReader, address common.Address, start, end uint64) (*HistoryStats, error) { return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) { blob, exists := h.accounts[address] if !exists { @@ -108,7 +108,7 @@ func accountHistory(freezer *rawdb.ResettableFreezer, address common.Address, st } // storageHistory inspects the storage history within the range. -func storageHistory(freezer *rawdb.ResettableFreezer, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) { +func storageHistory(freezer ethdb.AncientReader, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) { return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) { slots, exists := h.storages[address] if !exists { @@ -124,7 +124,7 @@ func storageHistory(freezer *rawdb.ResettableFreezer, address common.Address, sl } // historyRange returns the block number range of local state histories. -func historyRange(freezer *rawdb.ResettableFreezer) (uint64, uint64, error) { +func historyRange(freezer ethdb.AncientReader) (uint64, uint64, error) { // Load the id of the first history object in local store. tail, err := freezer.Tail() if err != nil { diff --git a/triedb/pathdb/history_test.go b/triedb/pathdb/history_test.go index 81ac768acd..4114aa1185 100644 --- a/triedb/pathdb/history_test.go +++ b/triedb/pathdb/history_test.go @@ -102,7 +102,7 @@ func TestEncodeDecodeHistory(t *testing.T) { } } -func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.ResettableFreezer, id uint64, root common.Hash, exist bool) { +func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, id uint64, root common.Hash, exist bool) { blob := rawdb.ReadStateHistoryMeta(freezer, id) if exist && len(blob) == 0 { t.Fatalf("Failed to load trie history, %d", id) @@ -118,7 +118,7 @@ func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.Resettab } } -func checkHistoriesInRange(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.ResettableFreezer, from, to uint64, roots []common.Hash, exist bool) { +func checkHistoriesInRange(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, from, to uint64, roots []common.Hash, exist bool) { for i, j := from, 0; i <= to; i, j = i+1, j+1 { checkHistory(t, db, freezer, i, roots[j], exist) } @@ -129,7 +129,7 @@ func TestTruncateHeadHistory(t *testing.T) { roots []common.Hash hs = makeHistories(10) db = rawdb.NewMemoryDatabase() - freezer, _ = openFreezer(t.TempDir(), false) + freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false) ) defer freezer.Close() @@ -157,7 +157,7 @@ func TestTruncateTailHistory(t *testing.T) { roots []common.Hash hs = makeHistories(10) db = rawdb.NewMemoryDatabase() - freezer, _ = openFreezer(t.TempDir(), false) + freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false) ) defer freezer.Close() @@ -200,7 +200,7 @@ func TestTruncateTailHistories(t *testing.T) { roots []common.Hash hs = makeHistories(10) db = rawdb.NewMemoryDatabase() - freezer, _ = openFreezer(t.TempDir()+fmt.Sprintf("%d", i), false) + freezer, _ = rawdb.NewStateFreezer(t.TempDir()+fmt.Sprintf("%d", i), false) ) defer freezer.Close() @@ -228,7 +228,7 @@ func TestTruncateOutOfRange(t *testing.T) { var ( hs = makeHistories(10) db = rawdb.NewMemoryDatabase() - freezer, _ = openFreezer(t.TempDir(), false) + freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false) ) defer freezer.Close() @@ -268,11 +268,6 @@ func TestTruncateOutOfRange(t *testing.T) { } } -// openFreezer initializes the freezer instance for storing state histories. -func openFreezer(datadir string, readOnly bool) (*rawdb.ResettableFreezer, error) { - return rawdb.NewStateFreezer(datadir, readOnly) -} - func compareSet[k comparable](a, b map[k][]byte) bool { if len(a) != len(b) { return false diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go index 3a0b7ebae2..1740ec5935 100644 --- a/triedb/pathdb/journal.go +++ b/triedb/pathdb/journal.go @@ -120,9 +120,10 @@ func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) { // loadLayers loads a pre-existing state layer backed by a key-value store. func (db *Database) loadLayers() layer { // Retrieve the root node of persistent state. - _, root := rawdb.ReadAccountTrieNode(db.diskdb, nil) - root = types.TrieRootHash(root) - + var root = types.EmptyRootHash + if blob := rawdb.ReadAccountTrieNode(db.diskdb, nil); len(blob) > 0 { + root = crypto.Keccak256Hash(blob) + } // Load the layers by resolving the journal head, err := db.loadJournal(root) if err == nil { @@ -361,14 +362,13 @@ func (db *Database) Journal(root common.Hash) error { if err := rlp.Encode(journal, journalVersion); err != nil { return err } - // The stored state in disk might be empty, convert the - // root to emptyRoot in this case. - _, diskroot := rawdb.ReadAccountTrieNode(db.diskdb, nil) - diskroot = types.TrieRootHash(diskroot) - // Secondly write out the state root in disk, ensure all layers // on top are continuous with disk. - if err := rlp.Encode(journal, diskroot); err != nil { + diskRoot := types.EmptyRootHash + if blob := rawdb.ReadAccountTrieNode(db.diskdb, nil); len(blob) > 0 { + diskRoot = crypto.Keccak256Hash(blob) + } + if err := rlp.Encode(journal, diskRoot); err != nil { return err } // Finally write out the journal of each layer in reverse order. diff --git a/triedb/pathdb/nodebuffer.go b/triedb/pathdb/nodebuffer.go index 4a13fcc44e..ff09484100 100644 --- a/triedb/pathdb/nodebuffer.go +++ b/triedb/pathdb/nodebuffer.go @@ -17,6 +17,7 @@ package pathdb import ( + "bytes" "fmt" "time" @@ -89,7 +90,7 @@ func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *no // The nodes belong to original diff layer are still accessible even // after merging, thus the ownership of nodes map should still belong // to original layer and any mutation on it should be prevented. - current = make(map[string]*trienode.Node) + current = make(map[string]*trienode.Node, len(subset)) for path, n := range subset { current[path] = n delta += int64(len(n.Blob) + len(path)) @@ -148,14 +149,14 @@ func (b *nodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[s // // In case of database rollback, don't panic if this "clean" // node occurs which is not present in buffer. - var nhash common.Hash + var blob []byte if owner == (common.Hash{}) { - _, nhash = rawdb.ReadAccountTrieNode(db, []byte(path)) + blob = rawdb.ReadAccountTrieNode(db, []byte(path)) } else { - _, nhash = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) + blob = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) } // Ignore the clean node in the case described above. - if nhash == n.Hash { + if bytes.Equal(blob, n.Blob) { continue } panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex()))