diff --git a/.golangci.yml b/.golangci.yml
index adb59f318f21..e355e6f9d12e 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -21,10 +21,14 @@ linters:
- staticcheck
- bidichk
- durationcheck
- - exportloopref
+ - copyloopvar
- whitespace
- revive # only certain checks enabled
-
+ - durationcheck
+ - gocheckcompilerdirectives
+ - reassign
+ - mirror
+ - tenv
### linters we tried and will not be using:
###
# - structcheck # lots of false positives
diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go
index dfcd0593937c..fc290cfe8415 100644
--- a/accounts/abi/abi_test.go
+++ b/accounts/abi/abi_test.go
@@ -1199,7 +1199,6 @@ func TestUnpackRevert(t *testing.T) {
{"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil},
}
for index, c := range cases {
- index, c := index, c
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
t.Parallel()
got, err := UnpackRevert(common.Hex2Bytes(c.input))
diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go
index e902345f090a..71357c7a8c70 100644
--- a/accounts/abi/bind/bind.go
+++ b/accounts/abi/bind/bind.go
@@ -252,7 +252,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
}
// Parse library references.
for pattern, name := range libs {
- matched, err := regexp.Match("__\\$"+pattern+"\\$__", []byte(contracts[types[i]].InputBin))
+ matched, err := regexp.MatchString("__\\$"+pattern+"\\$__", contracts[types[i]].InputBin)
if err != nil {
log.Error("Could not search for pattern", "pattern", pattern, "contract", contracts[types[i]], "err", err)
}
diff --git a/accounts/abi/event_test.go b/accounts/abi/event_test.go
index fffe28ea63a4..c548fd8db648 100644
--- a/accounts/abi/event_test.go
+++ b/accounts/abi/event_test.go
@@ -331,7 +331,6 @@ func TestEventTupleUnpack(t *testing.T) {
for _, tc := range testCases {
assert := assert.New(t)
- tc := tc
t.Run(tc.name, func(t *testing.T) {
err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert)
if tc.error == "" {
diff --git a/accounts/abi/pack_test.go b/accounts/abi/pack_test.go
index 00bdae469e21..cda31b6204d7 100644
--- a/accounts/abi/pack_test.go
+++ b/accounts/abi/pack_test.go
@@ -34,7 +34,6 @@ import (
func TestPack(t *testing.T) {
t.Parallel()
for i, test := range packUnpackTests {
- i, test := i, test
t.Run(strconv.Itoa(i), func(t *testing.T) {
t.Parallel()
encb, err := hex.DecodeString(test.packed)
diff --git a/accounts/abi/reflect_test.go b/accounts/abi/reflect_test.go
index 6c7ae57087db..577fa6ca7152 100644
--- a/accounts/abi/reflect_test.go
+++ b/accounts/abi/reflect_test.go
@@ -172,7 +172,6 @@ var reflectTests = []reflectTest{
func TestReflectNameToStruct(t *testing.T) {
t.Parallel()
for _, test := range reflectTests {
- test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
diff --git a/accounts/abi/topics_test.go b/accounts/abi/topics_test.go
index 9e1efd382160..6a4c50078af5 100644
--- a/accounts/abi/topics_test.go
+++ b/accounts/abi/topics_test.go
@@ -137,7 +137,6 @@ func TestMakeTopics(t *testing.T) {
},
}
for _, tt := range tests {
- tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := MakeTopics(tt.args.query...)
@@ -373,7 +372,6 @@ func TestParseTopics(t *testing.T) {
tests := setupTopicsTests()
for _, tt := range tests {
- tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
createObj := tt.args.createObj()
@@ -393,7 +391,6 @@ func TestParseTopicsIntoMap(t *testing.T) {
tests := setupTopicsTests()
for _, tt := range tests {
- tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
outMap := make(map[string]interface{})
diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go
index 29891ec0a411..7df7b9c40339 100644
--- a/accounts/abi/unpack_test.go
+++ b/accounts/abi/unpack_test.go
@@ -389,7 +389,6 @@ func TestMethodMultiReturn(t *testing.T) {
"Can not unpack into a slice with wrong types",
}}
for _, tc := range testCases {
- tc := tc
t.Run(tc.name, func(t *testing.T) {
require := require.New(t)
err := abi.UnpackIntoInterface(tc.dest, "multi", data)
@@ -947,7 +946,7 @@ func TestOOMMaliciousInput(t *testing.T) {
}
encb, err := hex.DecodeString(test.enc)
if err != nil {
- t.Fatalf("invalid hex: %s" + test.enc)
+ t.Fatalf("invalid hex: %s", test.enc)
}
_, err = abi.Methods["method"].Outputs.UnpackValues(encb)
if err == nil {
diff --git a/beacon/blsync/engineclient.go b/beacon/blsync/engineclient.go
index 97ef6f5cb88e..fb8f77f32b07 100644
--- a/beacon/blsync/engineclient.go
+++ b/beacon/blsync/engineclient.go
@@ -92,7 +92,7 @@ func (ec *engineClient) updateLoop(headCh <-chan types.ChainHeadEvent) {
}
func (ec *engineClient) callNewPayload(fork string, event types.ChainHeadEvent) (string, error) {
- execData := engine.BlockToExecutableData(event.Block, nil, nil).ExecutionPayload
+ execData := engine.BlockToExecutableData(event.Block, nil, nil, nil).ExecutionPayload
var (
method string
diff --git a/beacon/engine/gen_epe.go b/beacon/engine/gen_epe.go
index 039884e842fd..deada06166c5 100644
--- a/beacon/engine/gen_epe.go
+++ b/beacon/engine/gen_epe.go
@@ -20,7 +20,7 @@ func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) {
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Requests []hexutil.Bytes `json:"executionRequests"`
Override bool `json:"shouldOverrideBuilder"`
- Witness *hexutil.Bytes `json:"witness"`
+ Witness *hexutil.Bytes `json:"witness,omitempty"`
}
var enc ExecutionPayloadEnvelope
enc.ExecutionPayload = e.ExecutionPayload
@@ -45,7 +45,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Requests []hexutil.Bytes `json:"executionRequests"`
Override *bool `json:"shouldOverrideBuilder"`
- Witness *hexutil.Bytes `json:"witness"`
+ Witness *hexutil.Bytes `json:"witness,omitempty"`
}
var dec ExecutionPayloadEnvelope
if err := json.Unmarshal(input, &dec); err != nil {
diff --git a/beacon/engine/types.go b/beacon/engine/types.go
index 31f5e6fc2a62..34365ecfa8df 100644
--- a/beacon/engine/types.go
+++ b/beacon/engine/types.go
@@ -109,7 +109,7 @@ type ExecutionPayloadEnvelope struct {
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Requests [][]byte `json:"executionRequests"`
Override bool `json:"shouldOverrideBuilder"`
- Witness *hexutil.Bytes `json:"witness"`
+ Witness *hexutil.Bytes `json:"witness,omitempty"`
}
type BlobsBundleV1 struct {
@@ -260,7 +260,15 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
var requestsHash *common.Hash
if requests != nil {
- h := types.CalcRequestsHash(requests)
+ // Put back request type byte.
+ typedRequests := make([][]byte, len(requests))
+ for i, reqdata := range requests {
+ typedReqdata := make([]byte, len(reqdata)+1)
+ typedReqdata[0] = byte(i)
+ copy(typedReqdata[1:], reqdata)
+ typedRequests[i] = typedReqdata
+ }
+ h := types.CalcRequestsHash(typedRequests)
requestsHash = &h
}
@@ -294,7 +302,7 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
// BlockToExecutableData constructs the ExecutableData structure by filling the
// fields from the given block. It assumes the given block is post-merge block.
-func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar) *ExecutionPayloadEnvelope {
+func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar, requests [][]byte) *ExecutionPayloadEnvelope {
data := &ExecutableData{
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
@@ -315,6 +323,8 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
ExcessBlobGas: block.ExcessBlobGas(),
ExecutionWitness: block.ExecutionWitness(),
}
+
+ // Add blobs.
bundle := BlobsBundleV1{
Commitments: make([]hexutil.Bytes, 0),
Blobs: make([]hexutil.Bytes, 0),
@@ -327,7 +337,23 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(sidecar.Proofs[j][:]))
}
}
- return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees, BlobsBundle: &bundle, Override: false}
+
+ // Remove type byte in requests.
+ var plainRequests [][]byte
+ if requests != nil {
+ plainRequests = make([][]byte, len(requests))
+ for i, reqdata := range requests {
+ plainRequests[i] = reqdata[1:]
+ }
+ }
+
+ return &ExecutionPayloadEnvelope{
+ ExecutionPayload: data,
+ BlockValue: fees,
+ BlobsBundle: &bundle,
+ Requests: plainRequests,
+ Override: false,
+ }
}
// ExecutionPayloadBody is used in the response to GetPayloadBodiesByHash and GetPayloadBodiesByRange
diff --git a/build/checksums.txt b/build/checksums.txt
index e7fc5bdc7911..3da5d00deeb1 100644
--- a/build/checksums.txt
+++ b/build/checksums.txt
@@ -56,37 +56,37 @@ f45af3e1434175ff85620a74c07fb41d6844655f1f2cd2389c5fca6de000f58c go1.23.2.freeb
f626cdd92fc21a88b31c1251f419c17782933a42903db87a174ce74eeecc66a9 go1.23.2.linux-arm64.tar.gz
fa70d39ddeb6b55241a30b48d7af4e681c6a7d7104e8326c3bc1b12a75e091cc go1.23.2.solaris-amd64.tar.gz
-# version:golangci 1.59.0
+# version:golangci 1.61.0
# https://github.com/golangci/golangci-lint/releases/
-# https://github.com/golangci/golangci-lint/releases/download/v1.59.0/
-418acf7e255ddc0783e97129c9b03d9311b77826a5311d425a01c708a86417e7 golangci-lint-1.59.0-darwin-amd64.tar.gz
-5f6a1d95a6dd69f6e328eb56dd311a38e04cfab79a1305fbf4957f4e203f47b6 golangci-lint-1.59.0-darwin-arm64.tar.gz
-8899bf589185d49f747f3e5db9f0bde8a47245a100c64a3dd4d65e8e92cfc4f2 golangci-lint-1.59.0-freebsd-386.tar.gz
-658212f138d9df2ac89427e22115af34bf387c0871d70f2a25101718946a014f golangci-lint-1.59.0-freebsd-amd64.tar.gz
-4c6395ea40f314d3b6fa17d8997baab93464d5d1deeaab513155e625473bd03a golangci-lint-1.59.0-freebsd-armv6.tar.gz
-ff37da4fbaacdb6bbae70fdbdbb1ba932a859956f788c82822fa06bef5b7c6b3 golangci-lint-1.59.0-freebsd-armv7.tar.gz
-439739469ed2bda182b1ec276d40c40e02f195537f78e3672996741ad223d6b6 golangci-lint-1.59.0-illumos-amd64.tar.gz
-940801d46790e40d0a097d8fee34e2606f0ef148cd039654029b0b8750a15ed6 golangci-lint-1.59.0-linux-386.tar.gz
-3b14a439f33c4fff83dbe0349950d984042b9a1feb6c62f82787b598fc3ab5f4 golangci-lint-1.59.0-linux-amd64.tar.gz
-c57e6c0b0fa03089a2611dceddd5bc5d206716cccdff8b149da8baac598719a1 golangci-lint-1.59.0-linux-arm64.tar.gz
-93149e2d3b25ac754df9a23172403d8aa6d021a7e0d9c090a12f51897f68c9a0 golangci-lint-1.59.0-linux-armv6.tar.gz
-d10ac38239d9efee3ee87b55c96cdf3fa09e1a525babe3ffdaaf65ccc48cf3dc golangci-lint-1.59.0-linux-armv7.tar.gz
-047338114b4f0d5f08f0fb9a397b03cc171916ed0960be7dfb355c2320cd5e9c golangci-lint-1.59.0-linux-loong64.tar.gz
-5632df0f7f8fc03a80a266130faef0b5902d280cf60621f1b2bdc1aef6d97ee9 golangci-lint-1.59.0-linux-mips64.tar.gz
-71dd638c82fa4439171e7126d2c7a32b5d103bfdef282cea40c83632cb3d1f4b golangci-lint-1.59.0-linux-mips64le.tar.gz
-6cf9ea0d34e91669948483f9ae7f07da319a879344373a1981099fbd890cde00 golangci-lint-1.59.0-linux-ppc64le.tar.gz
-af0205fa6fbab197cee613c359947711231739095d21b5c837086233b36ad971 golangci-lint-1.59.0-linux-riscv64.tar.gz
-a9d2fb93f3c688ebccef94f5dc96c0b07c4d20bf6556cddebd8442159b0c80f6 golangci-lint-1.59.0-linux-s390x.tar.gz
-68ab4c57a847b8ace9679887f2f8b2b6760e57ee29dcde8c3f40dd8bb2654fa2 golangci-lint-1.59.0-netbsd-386.tar.gz
-d277b8b435c19406d00de4d509eadf5a024a5782878332e9a1b7c02bb76e87a7 golangci-lint-1.59.0-netbsd-amd64.tar.gz
-83211656be8dcfa1545af4f92894409f412d1f37566798cb9460a526593ad62c golangci-lint-1.59.0-netbsd-arm64.tar.gz
-6c6866d28bf79fa9817a0f7d2b050890ed109cae80bdb4dfa39536a7226da237 golangci-lint-1.59.0-netbsd-armv6.tar.gz
-11587566363bd03ca586b7df9776ccaed569fcd1f3489930ac02f9375b307503 golangci-lint-1.59.0-netbsd-armv7.tar.gz
-466181a8967bafa495e41494f93a0bec829c2cf715de874583b0460b3b8ae2b8 golangci-lint-1.59.0-windows-386.zip
-3317d8a87a99a49a0a1321d295c010790e6dbf43ee96b318f4b8bb23eae7a565 golangci-lint-1.59.0-windows-amd64.zip
-b3af955c7fceac8220a36fc799e1b3f19d3b247d32f422caac5f9845df8f7316 golangci-lint-1.59.0-windows-arm64.zip
-6f083c7d0c764e5a0e5bde46ee3e91ae357d80c194190fe1d9754392e9064c7e golangci-lint-1.59.0-windows-armv6.zip
-3709b4dd425deadab27748778d08e03c0f804d7748f7dd5b6bb488d98aa031c7 golangci-lint-1.59.0-windows-armv7.zip
+# https://github.com/golangci/golangci-lint/releases/download/v1.61.0/
+5c280ef3284f80c54fd90d73dc39ca276953949da1db03eb9dd0fbf868cc6e55 golangci-lint-1.61.0-darwin-amd64.tar.gz
+544334890701e4e04a6e574bc010bea8945205c08c44cced73745a6378012d36 golangci-lint-1.61.0-darwin-arm64.tar.gz
+e885a6f561092055930ebd298914d80e8fd2e10d2b1e9942836c2c6a115301fa golangci-lint-1.61.0-freebsd-386.tar.gz
+b13f6a3f11f65e7ff66b734d7554df3bbae0f485768848424e7554ed289e19c2 golangci-lint-1.61.0-freebsd-amd64.tar.gz
+cd8e7bbe5b8f33ed1597aa1cc588da96a3b9f22e1b9ae60d93511eae1a0ee8c5 golangci-lint-1.61.0-freebsd-armv6.tar.gz
+7ade524dbd88bd250968f45e190af90e151fa5ee63dd6aa7f7bb90e8155db61d golangci-lint-1.61.0-freebsd-armv7.tar.gz
+0fe3cd8a1ed8d9f54f48670a5af3df056d6040d94017057f0f4d65c930660ad9 golangci-lint-1.61.0-illumos-amd64.tar.gz
+b463fc5053a612abd26393ebaff1d85d7d56058946f4f0f7bf25ed44ea899415 golangci-lint-1.61.0-linux-386.tar.gz
+77cb0af99379d9a21d5dc8c38364d060e864a01bd2f3e30b5e8cc550c3a54111 golangci-lint-1.61.0-linux-amd64.tar.gz
+af60ac05566d9351615cb31b4cc070185c25bf8cbd9b09c1873aa5ec6f3cc17e golangci-lint-1.61.0-linux-arm64.tar.gz
+1f307f2fcc5d7d674062a967a0d83a7091e300529aa237ec6ad2b3dd14c897f5 golangci-lint-1.61.0-linux-armv6.tar.gz
+3ad8cbaae75a547450844811300f99c4cd290277398e43d22b9eb1792d15af4c golangci-lint-1.61.0-linux-armv7.tar.gz
+9be2ca67d961d7699079739cf6f7c8291c5183d57e34d1677de21ca19d0bd3ed golangci-lint-1.61.0-linux-loong64.tar.gz
+90d005e1648115ebf0861b408eab9c936079a24763e883058b0a227cd3135d31 golangci-lint-1.61.0-linux-mips64.tar.gz
+6d2ed4f49407115460b8c10ccfc40fd177e0887a48864a2879dd16e84ba2a48c golangci-lint-1.61.0-linux-mips64le.tar.gz
+633089589af5a58b7430afb6eee107d4e9c99e8d91711ddc219eb13a07e8d3b8 golangci-lint-1.61.0-linux-ppc64le.tar.gz
+4c1a097d9e0d1b4a8144dae6a1f5583a38d662f3bdc1498c4e954b6ed856be98 golangci-lint-1.61.0-linux-riscv64.tar.gz
+30581d3c987d287b7064617f1a2694143e10dffc40bc25be6636006ee82d7e1c golangci-lint-1.61.0-linux-s390x.tar.gz
+42530bf8100bd43c07f5efe6d92148ba6c5a7a712d510c6f24be85af6571d5eb golangci-lint-1.61.0-netbsd-386.tar.gz
+b8bb07c920f6601edf718d5e82ec0784fd590b0992b42b6ec18da99f26013ed4 golangci-lint-1.61.0-netbsd-amd64.tar.gz
+353a51527c60bd0776b0891b03f247c791986f625fca689d121972c624e54198 golangci-lint-1.61.0-netbsd-arm64.tar.gz
+957a6272c3137910514225704c5dac0723b9c65eb7d9587366a997736e2d7580 golangci-lint-1.61.0-netbsd-armv6.tar.gz
+a89eb28ff7f18f5cd52b914739360fa95cf2f643de4adeca46e26bec3a07e8d8 golangci-lint-1.61.0-netbsd-armv7.tar.gz
+d8d74c43600b271393000717a4ed157d7a15bb85bab7db2efad9b63a694d4634 golangci-lint-1.61.0-windows-386.zip
+e7bc2a81929a50f830244d6d2e657cce4f19a59aff49fa9000176ff34fda64ce golangci-lint-1.61.0-windows-amd64.zip
+ed97c221596dd771e3dd9344872c140340bee2e819cd7a90afa1de752f1f2e0f golangci-lint-1.61.0-windows-arm64.zip
+4b365233948b13d02d45928a5c390045e00945e919747b9887b5f260247541ae golangci-lint-1.61.0-windows-armv6.zip
+595538fb64d152173959d28f6235227f9cd969a828e5af0c4e960d02af4ffd0e golangci-lint-1.61.0-windows-armv7.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
#
diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go
index 2b503d62df93..2a70e0328fd4 100644
--- a/cmd/devp2p/internal/ethtest/chain.go
+++ b/cmd/devp2p/internal/ethtest/chain.go
@@ -100,7 +100,6 @@ func (c *Chain) AccountsInHashOrder() []state.DumpAccount {
list := make([]state.DumpAccount, len(c.state))
i := 0
for addr, acc := range c.state {
- addr := addr
list[i] = acc
list[i].Address = &addr
if len(acc.AddressHash) != 32 {
diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go
index 4f1b6f86562a..9c1efa0e8e22 100644
--- a/cmd/devp2p/internal/ethtest/snap.go
+++ b/cmd/devp2p/internal/ethtest/snap.go
@@ -286,7 +286,6 @@ a key before startingHash (wrong order). The server should return the first avai
}
for i, tc := range tests {
- tc := tc
if i > 0 {
t.Log("\n")
}
@@ -429,7 +428,6 @@ of the test account. The server should return slots [2,3] (i.e. the 'next availa
}
for i, tc := range tests {
- tc := tc
if i > 0 {
t.Log("\n")
}
@@ -526,7 +524,6 @@ func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
}
for i, tc := range tests {
- tc := tc
if i > 0 {
t.Log("\n")
}
@@ -723,7 +720,6 @@ The server should reject the request.`,
}
for i, tc := range tests {
- tc := tc
if i > 0 {
t.Log("\n")
}
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index 9f30d7ba6c32..f80dd02c67a7 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -132,7 +132,7 @@ type rejectedTx struct {
// Apply applies a set of transactions to a pre-state
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
txIt txIterator, miningReward int64,
- getTracerFn func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error)) (*state.StateDB, *ExecutionResult, []byte, error) {
+ getTracerFn func(txIndex int, txHash common.Hash, chainConfig *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error)) (*state.StateDB, *ExecutionResult, []byte, error) {
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
// required blockhashes
var hashError error
@@ -242,7 +242,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
continue
}
}
- tracer, traceOutput, err := getTracerFn(txIndex, tx.Hash())
+ tracer, traceOutput, err := getTracerFn(txIndex, tx.Hash(), chainConfig)
if err != nil {
return nil, nil, nil, err
}
diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go
index fa052f59549b..d8665d22d34b 100644
--- a/cmd/evm/internal/t8ntool/transition.go
+++ b/cmd/evm/internal/t8ntool/transition.go
@@ -82,7 +82,9 @@ type input struct {
}
func Transition(ctx *cli.Context) error {
- var getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) { return nil, nil, nil }
+ var getTracer = func(txIndex int, txHash common.Hash, chainConfig *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error) {
+ return nil, nil, nil
+ }
baseDir, err := createBasedir(ctx)
if err != nil {
@@ -97,7 +99,7 @@ func Transition(ctx *cli.Context) error {
EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name),
Debug: true,
}
- getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) {
+ getTracer = func(txIndex int, txHash common.Hash, _ *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error) {
traceFile, err := os.Create(filepath.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String())))
if err != nil {
return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
@@ -121,12 +123,12 @@ func Transition(ctx *cli.Context) error {
if ctx.IsSet(TraceTracerConfigFlag.Name) {
config = []byte(ctx.String(TraceTracerConfigFlag.Name))
}
- getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) {
+ getTracer = func(txIndex int, txHash common.Hash, chainConfig *params.ChainConfig) (*tracers.Tracer, io.WriteCloser, error) {
traceFile, err := os.Create(filepath.Join(baseDir, fmt.Sprintf("trace-%d-%v.json", txIndex, txHash.String())))
if err != nil {
return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
}
- tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name), nil, config)
+ tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name), nil, config, chainConfig)
if err != nil {
return nil, nil, NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %w", err))
}
diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go
index 76ebc420ec6c..65723694f9ea 100644
--- a/cmd/evm/t8n_test.go
+++ b/cmd/evm/t8n_test.go
@@ -524,7 +524,7 @@ func TestT9n(t *testing.T) {
ok, err := cmpJson(have, want)
switch {
case err != nil:
- t.Logf(string(have))
+ t.Log(string(have))
t.Fatalf("test %d, json parsing failed: %v", i, err)
case !ok:
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
@@ -659,7 +659,7 @@ func TestB11r(t *testing.T) {
ok, err := cmpJson(have, want)
switch {
case err != nil:
- t.Logf(string(have))
+ t.Log(string(have))
t.Fatalf("test %d, json parsing failed: %v", i, err)
case !ok:
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
diff --git a/cmd/geth/accountcmd_test.go b/cmd/geth/accountcmd_test.go
index ea3a7c3b647c..8416eb40ef96 100644
--- a/cmd/geth/accountcmd_test.go
+++ b/cmd/geth/accountcmd_test.go
@@ -113,7 +113,6 @@ func TestAccountImport(t *testing.T) {
},
}
for _, test := range tests {
- test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
importAccountWithExpect(t, test.key, test.output)
diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go
index e2d31255596e..2a59f0052f94 100644
--- a/cmd/geth/consolecmd.go
+++ b/cmd/geth/consolecmd.go
@@ -152,7 +152,7 @@ func remoteConsole(ctx *cli.Context) error {
func ephemeralConsole(ctx *cli.Context) error {
var b strings.Builder
for _, file := range ctx.Args().Slice() {
- b.Write([]byte(fmt.Sprintf("loadScript('%s');", file)))
+ b.WriteString(fmt.Sprintf("loadScript('%s');", file))
}
utils.Fatalf(`The "js" command is deprecated. Please use the following instead:
geth --exec "%s" console`, b.String())
diff --git a/cmd/geth/version_check_test.go b/cmd/geth/version_check_test.go
index 3676d25d0022..34171cb035f8 100644
--- a/cmd/geth/version_check_test.go
+++ b/cmd/geth/version_check_test.go
@@ -170,7 +170,6 @@ func TestKeyID(t *testing.T) {
{"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
}
for _, tt := range tests {
- tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if got := keyID(tt.args.id); got != tt.want {
diff --git a/cmd/rlpdump/main.go b/cmd/rlpdump/main.go
index 7e1d314d4924..685e5bb71a69 100644
--- a/cmd/rlpdump/main.go
+++ b/cmd/rlpdump/main.go
@@ -142,7 +142,7 @@ func dump(in *inStream, s *rlp.Stream, depth int, out io.Writer) error {
s.List()
defer s.ListEnd()
if size == 0 {
- fmt.Fprintf(out, ws(depth)+"[]")
+ fmt.Fprint(out, ws(depth)+"[]")
} else {
fmt.Fprintln(out, ws(depth)+"[")
for i := 0; ; i++ {
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 28be4f466ef0..b47572a08154 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -544,6 +544,7 @@ var (
VMTraceJsonConfigFlag = &cli.StringFlag{
Name: "vmtrace.jsonconfig",
Usage: "Tracer configuration (JSON)",
+ Value: "{}",
Category: flags.VMCategory,
}
// API options.
@@ -1980,13 +1981,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// VM tracing config.
if ctx.IsSet(VMTraceFlag.Name) {
if name := ctx.String(VMTraceFlag.Name); name != "" {
- var config string
- if ctx.IsSet(VMTraceJsonConfigFlag.Name) {
- config = ctx.String(VMTraceJsonConfigFlag.Name)
- }
-
cfg.VMTrace = name
- cfg.VMTraceJsonConfig = config
+ cfg.VMTraceJsonConfig = ctx.String(VMTraceJsonConfigFlag.Name)
}
}
}
@@ -2267,10 +2263,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
}
if ctx.IsSet(VMTraceFlag.Name) {
if name := ctx.String(VMTraceFlag.Name); name != "" {
- var config json.RawMessage
- if ctx.IsSet(VMTraceJsonConfigFlag.Name) {
- config = json.RawMessage(ctx.String(VMTraceJsonConfigFlag.Name))
- }
+ config := json.RawMessage(ctx.String(VMTraceJsonConfigFlag.Name))
t, err := tracers.LiveDirectory.New(name, config)
if err != nil {
Fatalf("Failed to create tracer %q: %v", name, err)
diff --git a/cmd/utils/flags_test.go b/cmd/utils/flags_test.go
index 00c73a5264c0..0be3370d4a81 100644
--- a/cmd/utils/flags_test.go
+++ b/cmd/utils/flags_test.go
@@ -56,7 +56,6 @@ func Test_SplitTagsFlag(t *testing.T) {
},
}
for _, tt := range tests {
- tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if got := SplitTagsFlag(tt.args); !reflect.DeepEqual(got, tt.want) {
diff --git a/cmd/utils/prompt_test.go b/cmd/utils/prompt_test.go
index 889bf71de335..236353a7cc28 100644
--- a/cmd/utils/prompt_test.go
+++ b/cmd/utils/prompt_test.go
@@ -66,7 +66,6 @@ func TestGetPassPhraseWithList(t *testing.T) {
},
}
for _, tt := range tests {
- tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if got := GetPassPhraseWithList(tt.args.text, tt.args.confirmation, tt.args.index, tt.args.passwords); got != tt.want {
diff --git a/core/block_validator.go b/core/block_validator.go
index 35695d34d85e..59783a040730 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -121,7 +121,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// such as amount of used gas, the receipt roots and the state root itself.
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, stateless bool) error {
if res == nil {
- return fmt.Errorf("nil ProcessResult value")
+ return errors.New("nil ProcessResult value")
}
header := block.Header()
if block.GasUsed() != res.GasUsed {
@@ -150,7 +150,7 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
return fmt.Errorf("invalid requests hash (remote: %x local: %x)", *header.RequestsHash, reqhash)
}
} else if res.Requests != nil {
- return fmt.Errorf("block has requests before prague fork")
+ return errors.New("block has requests before prague fork")
}
// Validate the state root against the received state root and throw
// an error if they don't match.
diff --git a/core/blockchain.go b/core/blockchain.go
index f7c921fe64fe..02c0bbaad1cb 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -23,6 +23,7 @@ import (
"io"
"math/big"
"runtime"
+ "slices"
"strings"
"sync"
"sync/atomic"
@@ -224,7 +225,6 @@ type BlockChain struct {
hc *HeaderChain
rmLogsFeed event.Feed
chainFeed event.Feed
- chainSideFeed event.Feed
chainHeadFeed event.Feed
logsFeed event.Feed
blockProcFeed event.Feed
@@ -571,15 +571,14 @@ func (bc *BlockChain) SetHead(head uint64) error {
}
// Send chain head event to update the transaction pool
header := bc.CurrentBlock()
- block := bc.GetBlock(header.Hash(), header.Number.Uint64())
- if block == nil {
+ if block := bc.GetBlock(header.Hash(), header.Number.Uint64()); block == nil {
// This should never happen. In practice, previously currentBlock
// contained the entire block whereas now only a "marker", so there
// is an ever so slight chance for a race we should handle.
log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
}
- bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
+ bc.chainHeadFeed.Send(ChainHeadEvent{Header: header})
return nil
}
@@ -593,15 +592,14 @@ func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
}
// Send chain head event to update the transaction pool
header := bc.CurrentBlock()
- block := bc.GetBlock(header.Hash(), header.Number.Uint64())
- if block == nil {
+ if block := bc.GetBlock(header.Hash(), header.Number.Uint64()); block == nil {
// This should never happen. In practice, previously currentBlock
// contained the entire block whereas now only a "marker", so there
// is an ever so slight chance for a race we should handle.
log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
}
- bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
+ bc.chainHeadFeed.Send(ChainHeadEvent{Header: header})
return nil
}
@@ -1438,7 +1436,7 @@ func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (e
func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
current := bc.CurrentBlock()
if block.ParentHash() != current.Hash() {
- if err := bc.reorg(current, block); err != nil {
+ if err := bc.reorg(current, block.Header()); err != nil {
return err
}
}
@@ -1544,7 +1542,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
// Reorganise the chain if the parent is not the head block
if block.ParentHash() != currentBlock.Hash() {
- if err := bc.reorg(currentBlock, block); err != nil {
+ if err := bc.reorg(currentBlock, block.Header()); err != nil {
return NonStatTy, err
}
}
@@ -1552,7 +1550,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
// Set new head.
bc.writeHeadBlock(block)
- bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
+ bc.chainFeed.Send(ChainEvent{Header: block.Header()})
if len(logs) > 0 {
bc.logsFeed.Send(logs)
}
@@ -1562,7 +1560,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
// we will fire an accumulated ChainHeadEvent and disable fire
// event here.
if emitHeadEvent {
- bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
+ bc.chainHeadFeed.Send(ChainHeadEvent{Header: block.Header()})
}
return CanonStatTy, nil
}
@@ -1627,7 +1625,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
// Fire a single chain head event if we've progressed the chain
defer func() {
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
- bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
+ bc.chainHeadFeed.Send(ChainHeadEvent{Header: lastCanon.Header()})
}
}()
// Start the parallel header verifier
@@ -2157,8 +2155,8 @@ func (bc *BlockChain) recoverAncestors(block *types.Block, makeWitness bool) (co
return block.Hash(), nil
}
-// collectLogs collects the logs that were generated or removed during
-// the processing of a block. These logs are later announced as deleted or reborn.
+// collectLogs collects the logs that were generated or removed during the
+// processing of a block. These logs are later announced as deleted or reborn.
func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
var blobGasPrice *big.Int
excessBlobGas := b.ExcessBlobGas()
@@ -2184,70 +2182,55 @@ func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
// reorg takes two blocks, an old chain and a new chain and will reconstruct the
// blocks and inserts them to be part of the new canonical chain and accumulates
// potential missing transactions and post an event about them.
+//
// Note the new head block won't be processed here, callers need to handle it
// externally.
-func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
+func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Header) error {
var (
- newChain types.Blocks
- oldChain types.Blocks
- commonBlock *types.Block
-
- deletedTxs []common.Hash
- addedTxs []common.Hash
+ newChain []*types.Header
+ oldChain []*types.Header
+ commonBlock *types.Header
)
- oldBlock := bc.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
- if oldBlock == nil {
- return errors.New("current head block missing")
- }
- newBlock := newHead
-
// Reduce the longer chain to the same number as the shorter one
- if oldBlock.NumberU64() > newBlock.NumberU64() {
+ if oldHead.Number.Uint64() > newHead.Number.Uint64() {
// Old chain is longer, gather all transactions and logs as deleted ones
- for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
- oldChain = append(oldChain, oldBlock)
- for _, tx := range oldBlock.Transactions() {
- deletedTxs = append(deletedTxs, tx.Hash())
- }
+ for ; oldHead != nil && oldHead.Number.Uint64() != newHead.Number.Uint64(); oldHead = bc.GetHeader(oldHead.ParentHash, oldHead.Number.Uint64()-1) {
+ oldChain = append(oldChain, oldHead)
}
} else {
// New chain is longer, stash all blocks away for subsequent insertion
- for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
- newChain = append(newChain, newBlock)
+ for ; newHead != nil && newHead.Number.Uint64() != oldHead.Number.Uint64(); newHead = bc.GetHeader(newHead.ParentHash, newHead.Number.Uint64()-1) {
+ newChain = append(newChain, newHead)
}
}
- if oldBlock == nil {
+ if oldHead == nil {
return errInvalidOldChain
}
- if newBlock == nil {
+ if newHead == nil {
return errInvalidNewChain
}
// Both sides of the reorg are at the same number, reduce both until the common
// ancestor is found
for {
// If the common ancestor was found, bail out
- if oldBlock.Hash() == newBlock.Hash() {
- commonBlock = oldBlock
+ if oldHead.Hash() == newHead.Hash() {
+ commonBlock = oldHead
break
}
// Remove an old block as well as stash away a new block
- oldChain = append(oldChain, oldBlock)
- for _, tx := range oldBlock.Transactions() {
- deletedTxs = append(deletedTxs, tx.Hash())
- }
- newChain = append(newChain, newBlock)
+ oldChain = append(oldChain, oldHead)
+ newChain = append(newChain, newHead)
// Step back with both chains
- oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
- if oldBlock == nil {
+ oldHead = bc.GetHeader(oldHead.ParentHash, oldHead.Number.Uint64()-1)
+ if oldHead == nil {
return errInvalidOldChain
}
- newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
- if newBlock == nil {
+ newHead = bc.GetHeader(newHead.ParentHash, newHead.Number.Uint64()-1)
+ if newHead == nil {
return errInvalidNewChain
}
}
-
// Ensure the user sees large reorgs
if len(oldChain) > 0 && len(newChain) > 0 {
logFn := log.Info
@@ -2256,7 +2239,7 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
msg = "Large chain reorg detected"
logFn = log.Warn
}
- logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
+ logFn(msg, "number", commonBlock.Number, "hash", commonBlock.Hash(),
"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
blockReorgAddMeter.Mark(int64(len(newChain)))
blockReorgDropMeter.Mark(int64(len(oldChain)))
@@ -2264,55 +2247,112 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
} else if len(newChain) > 0 {
// Special case happens in the post merge stage that current head is
// the ancestor of new head while these two blocks are not consecutive
- log.Info("Extend chain", "add", len(newChain), "number", newChain[0].Number(), "hash", newChain[0].Hash())
+ log.Info("Extend chain", "add", len(newChain), "number", newChain[0].Number, "hash", newChain[0].Hash())
blockReorgAddMeter.Mark(int64(len(newChain)))
} else {
// len(newChain) == 0 && len(oldChain) > 0
// rewind the canonical chain to a lower point.
- log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
+ log.Error("Impossible reorg, please file an issue", "oldnum", oldHead.Number, "oldhash", oldHead.Hash(), "oldblocks", len(oldChain), "newnum", newHead.Number, "newhash", newHead.Hash(), "newblocks", len(newChain))
}
// Acquire the tx-lookup lock before mutation. This step is essential
// as the txlookups should be changed atomically, and all subsequent
// reads should be blocked until the mutation is complete.
bc.txLookupLock.Lock()
- // Insert the new chain segment in incremental order, from the old
- // to the new. The new chain head (newChain[0]) is not inserted here,
- // as it will be handled separately outside of this function
- for i := len(newChain) - 1; i >= 1; i-- {
- // Insert the block in the canonical way, re-writing history
- bc.writeHeadBlock(newChain[i])
+ // Reorg can be executed, start reducing the chain's old blocks and appending
+ // the new blocks
+ var (
+ deletedTxs []common.Hash
+ rebirthTxs []common.Hash
- // Collect the new added transactions.
- for _, tx := range newChain[i].Transactions() {
- addedTxs = append(addedTxs, tx.Hash())
+ deletedLogs []*types.Log
+ rebirthLogs []*types.Log
+ )
+ // Deleted log emission on the API uses forward order, which is borked, but
+ // we'll leave it in for legacy reasons.
+ //
+ // TODO(karalabe): This should be nuked out, no idea how, deprecate some APIs?
+ {
+ for i := len(oldChain) - 1; i >= 0; i-- {
+ block := bc.GetBlock(oldChain[i].Hash(), oldChain[i].Number.Uint64())
+ if block == nil {
+ return errInvalidOldChain // Corrupt database, mostly here to avoid weird panics
+ }
+ if logs := bc.collectLogs(block, true); len(logs) > 0 {
+ deletedLogs = append(deletedLogs, logs...)
+ }
+ if len(deletedLogs) > 512 {
+ bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
+ deletedLogs = nil
+ }
+ }
+ if len(deletedLogs) > 0 {
+ bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
}
}
+ // Undo old blocks in reverse order
+ for i := 0; i < len(oldChain); i++ {
+ // Collect all the deleted transactions
+ block := bc.GetBlock(oldChain[i].Hash(), oldChain[i].Number.Uint64())
+ if block == nil {
+ return errInvalidOldChain // Corrupt database, mostly here to avoid weird panics
+ }
+ for _, tx := range block.Transactions() {
+ deletedTxs = append(deletedTxs, tx.Hash())
+ }
+ // Collect deleted logs and emit them for new integrations
+ if logs := bc.collectLogs(block, true); len(logs) > 0 {
+ // Emit revertals latest first, older then
+ slices.Reverse(logs)
+ // TODO(karalabe): Hook into the reverse emission part
+ }
+ }
+ // Apply new blocks in forward order
+ for i := len(newChain) - 1; i >= 1; i-- {
+ // Collect all the included transactions
+ block := bc.GetBlock(newChain[i].Hash(), newChain[i].Number.Uint64())
+ if block == nil {
+ return errInvalidNewChain // Corrupt database, mostly here to avoid weird panics
+ }
+ for _, tx := range block.Transactions() {
+ rebirthTxs = append(rebirthTxs, tx.Hash())
+ }
+ // Collect inserted logs and emit them
+ if logs := bc.collectLogs(block, false); len(logs) > 0 {
+ rebirthLogs = append(rebirthLogs, logs...)
+ }
+ if len(rebirthLogs) > 512 {
+ bc.logsFeed.Send(rebirthLogs)
+ rebirthLogs = nil
+ }
+ // Update the head block
+ bc.writeHeadBlock(block)
+ }
+ if len(rebirthLogs) > 0 {
+ bc.logsFeed.Send(rebirthLogs)
+ }
// Delete useless indexes right now which includes the non-canonical
// transaction indexes, canonical chain indexes which above the head.
- var (
- indexesBatch = bc.db.NewBatch()
- diffs = types.HashDifference(deletedTxs, addedTxs)
- )
- for _, tx := range diffs {
- rawdb.DeleteTxLookupEntry(indexesBatch, tx)
+ batch := bc.db.NewBatch()
+ for _, tx := range types.HashDifference(deletedTxs, rebirthTxs) {
+ rawdb.DeleteTxLookupEntry(batch, tx)
}
// Delete all hash markers that are not part of the new canonical chain.
// Because the reorg function does not handle new chain head, all hash
// markers greater than or equal to new chain head should be deleted.
- number := commonBlock.NumberU64()
+ number := commonBlock.Number
if len(newChain) > 1 {
- number = newChain[1].NumberU64()
+ number = newChain[1].Number
}
- for i := number + 1; ; i++ {
+ for i := number.Uint64() + 1; ; i++ {
hash := rawdb.ReadCanonicalHash(bc.db, i)
if hash == (common.Hash{}) {
break
}
- rawdb.DeleteCanonicalHash(indexesBatch, i)
+ rawdb.DeleteCanonicalHash(batch, i)
}
- if err := indexesBatch.Write(); err != nil {
+ if err := batch.Write(); err != nil {
log.Crit("Failed to delete useless indexes", "err", err)
}
// Reset the tx lookup cache to clear stale txlookup cache.
@@ -2321,43 +2361,6 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
// Release the tx-lookup lock after mutation.
bc.txLookupLock.Unlock()
- // Send out events for logs from the old canon chain, and 'reborn'
- // logs from the new canon chain. The number of logs can be very
- // high, so the events are sent in batches of size around 512.
-
- // Deleted logs + blocks:
- var deletedLogs []*types.Log
- for i := len(oldChain) - 1; i >= 0; i-- {
- // Also send event for blocks removed from the canon chain.
- bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
-
- // Collect deleted logs for notification
- if logs := bc.collectLogs(oldChain[i], true); len(logs) > 0 {
- deletedLogs = append(deletedLogs, logs...)
- }
- if len(deletedLogs) > 512 {
- bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
- deletedLogs = nil
- }
- }
- if len(deletedLogs) > 0 {
- bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
- }
-
- // New logs:
- var rebirthLogs []*types.Log
- for i := len(newChain) - 1; i >= 1; i-- {
- if logs := bc.collectLogs(newChain[i], false); len(logs) > 0 {
- rebirthLogs = append(rebirthLogs, logs...)
- }
- if len(rebirthLogs) > 512 {
- bc.logsFeed.Send(rebirthLogs)
- rebirthLogs = nil
- }
- }
- if len(rebirthLogs) > 0 {
- bc.logsFeed.Send(rebirthLogs)
- }
return nil
}
@@ -2395,7 +2398,7 @@ func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) {
// Run the reorg if necessary and set the given block as new head.
start := time.Now()
if head.ParentHash() != bc.CurrentBlock().Hash() {
- if err := bc.reorg(bc.CurrentBlock(), head); err != nil {
+ if err := bc.reorg(bc.CurrentBlock(), head.Header()); err != nil {
return common.Hash{}, err
}
}
@@ -2403,11 +2406,11 @@ func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) {
// Emit events
logs := bc.collectLogs(head, false)
- bc.chainFeed.Send(ChainEvent{Block: head, Hash: head.Hash(), Logs: logs})
+ bc.chainFeed.Send(ChainEvent{Header: head.Header()})
if len(logs) > 0 {
bc.logsFeed.Send(logs)
}
- bc.chainHeadFeed.Send(ChainHeadEvent{Block: head})
+ bc.chainHeadFeed.Send(ChainHeadEvent{Header: head.Header()})
context := []interface{}{
"number", head.Number(),
diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go
index 6b8dffdcdc63..19c1b17f369c 100644
--- a/core/blockchain_reader.go
+++ b/core/blockchain_reader.go
@@ -430,11 +430,6 @@ func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Su
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
}
-// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
-func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
- return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
-}
-
// SubscribeLogsEvent registers a subscription of []*types.Log.
func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return bc.scope.Track(bc.logsFeed.Subscribe(ch))
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index aeeb9095d87d..8a2dfe9f11f0 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -1767,7 +1767,6 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
@@ -1852,7 +1851,6 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
db, err = rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
@@ -1974,7 +1972,6 @@ func testIssue23496(t *testing.T, scheme string) {
db, err = rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
index 123c2c9af16e..b72de3389636 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/blockchain_sethead_test.go
@@ -1971,7 +1971,6 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
index 3803c153e700..120977f222fc 100644
--- a/core/blockchain_snapshot_test.go
+++ b/core/blockchain_snapshot_test.go
@@ -68,7 +68,6 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
@@ -259,7 +258,6 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
newdb, err := rawdb.Open(rawdb.OpenOptions{
Directory: snaptest.datadir,
AncientsDirectory: snaptest.ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index f107b322b661..d8f7da0643ca 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -1332,85 +1332,6 @@ func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan Re
}
}
-func TestReorgSideEvent(t *testing.T) {
- testReorgSideEvent(t, rawdb.HashScheme)
- testReorgSideEvent(t, rawdb.PathScheme)
-}
-
-func testReorgSideEvent(t *testing.T, scheme string) {
- var (
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- gspec = &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
- }
- signer = types.LatestSigner(gspec.Config)
- )
- blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
- defer blockchain.Stop()
-
- _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
- if _, err := blockchain.InsertChain(chain); err != nil {
- t.Fatalf("failed to insert chain: %v", err)
- }
-
- _, replacementBlocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, gen *BlockGen) {
- tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, nil), signer, key1)
- if i == 2 {
- gen.OffsetTime(-9)
- }
- if err != nil {
- t.Fatalf("failed to create tx: %v", err)
- }
- gen.AddTx(tx)
- })
- chainSideCh := make(chan ChainSideEvent, 64)
- blockchain.SubscribeChainSideEvent(chainSideCh)
- if _, err := blockchain.InsertChain(replacementBlocks); err != nil {
- t.Fatalf("failed to insert chain: %v", err)
- }
-
- expectedSideHashes := map[common.Hash]bool{
- chain[0].Hash(): true,
- chain[1].Hash(): true,
- chain[2].Hash(): true,
- }
-
- i := 0
-
- const timeoutDura = 10 * time.Second
- timeout := time.NewTimer(timeoutDura)
-done:
- for {
- select {
- case ev := <-chainSideCh:
- block := ev.Block
- if _, ok := expectedSideHashes[block.Hash()]; !ok {
- t.Errorf("%d: didn't expect %x to be in side chain", i, block.Hash())
- }
- i++
-
- if i == len(expectedSideHashes) {
- timeout.Stop()
-
- break done
- }
- timeout.Reset(timeoutDura)
-
- case <-timeout.C:
- t.Fatalf("Timeout. Possibly not all blocks were triggered for sideevent: %v", i)
- }
- }
-
- // make sure no more events are fired
- select {
- case e := <-chainSideCh:
- t.Errorf("unexpected event fired: %v", e)
- case <-time.After(250 * time.Millisecond):
- }
-}
-
// Tests if the canonical block can be fetched from the database during chain insertion.
func TestCanonicalBlockRetrieval(t *testing.T) {
testCanonicalBlockRetrieval(t, rawdb.HashScheme)
@@ -2744,7 +2665,6 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
- Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
@@ -4311,3 +4231,36 @@ func TestPragueRequests(t *testing.T) {
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
}
+
+func BenchmarkReorg(b *testing.B) {
+ chainLength := b.N
+
+ dir := b.TempDir()
+ db, err := rawdb.NewLevelDBDatabase(dir, 128, 128, "", false)
+ if err != nil {
+ b.Fatalf("cannot create temporary database: %v", err)
+ }
+ defer db.Close()
+ gspec := &Genesis{
+ Config: params.TestChainConfig,
+ Alloc: types.GenesisAlloc{benchRootAddr: {Balance: math.BigPow(2, 254)}},
+ }
+ blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
+ defer blockchain.Stop()
+
+ // Insert an easy and a difficult chain afterwards
+ easyBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), db, chainLength, genValueTx(50000))
+ diffBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), db, chainLength, genValueTx(50000))
+
+ if _, err := blockchain.InsertChain(easyBlocks); err != nil {
+ b.Fatalf("failed to insert easy chain: %v", err)
+ }
+ b.ResetTimer()
+ if _, err := blockchain.InsertChain(diffBlocks); err != nil {
+ b.Fatalf("failed to insert difficult chain: %v", err)
+ }
+}
+
+// Master: BenchmarkReorg-8 10000 899591 ns/op 820154 B/op 1440 allocs/op 1549443072 bytes of heap used
+// WithoutOldChain: BenchmarkReorg-8 10000 1147281 ns/op 943163 B/op 1564 allocs/op 1163870208 bytes of heap used
+// WithoutNewChain: BenchmarkReorg-8 10000 1018922 ns/op 943580 B/op 1564 allocs/op 1171890176 bytes of heap used
diff --git a/core/chain_indexer.go b/core/chain_indexer.go
index f5fce7258831..2865daa1ff4d 100644
--- a/core/chain_indexer.go
+++ b/core/chain_indexer.go
@@ -222,20 +222,19 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainH
errc <- nil
return
}
- header := ev.Block.Header()
- if header.ParentHash != prevHash {
+ if ev.Header.ParentHash != prevHash {
// Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then)
// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?
if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number.Uint64()) != prevHash {
- if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
+ if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, ev.Header); h != nil {
c.newHead(h.Number.Uint64(), true)
}
}
}
- c.newHead(header.Number.Uint64(), false)
+ c.newHead(ev.Header.Number.Uint64(), false)
- prevHeader, prevHash = header, header.Hash()
+ prevHeader, prevHash = ev.Header, ev.Header.Hash()
}
}
}
diff --git a/core/events.go b/core/events.go
index ac935a137f5f..5ad2cb1f7b32 100644
--- a/core/events.go
+++ b/core/events.go
@@ -17,27 +17,19 @@
package core
import (
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// NewTxsEvent is posted when a batch of transactions enter the transaction pool.
type NewTxsEvent struct{ Txs []*types.Transaction }
-// NewMinedBlockEvent is posted when a block has been imported.
-type NewMinedBlockEvent struct{ Block *types.Block }
-
// RemovedLogsEvent is posted when a reorg happens
type RemovedLogsEvent struct{ Logs []*types.Log }
type ChainEvent struct {
- Block *types.Block
- Hash common.Hash
- Logs []*types.Log
+ Header *types.Header
}
-type ChainSideEvent struct {
- Block *types.Block
+type ChainHeadEvent struct {
+ Header *types.Header
}
-
-type ChainHeadEvent struct{ Block *types.Block }
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
index 2d30af4b3d20..0b9dbe133561 100644
--- a/core/rawdb/accessors_chain_test.go
+++ b/core/rawdb/accessors_chain_test.go
@@ -388,10 +388,10 @@ func TestBlockReceiptStorage(t *testing.T) {
// Insert the receipt slice into the database and check presence
WriteReceipts(db, hash, 0, receipts)
if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) == 0 {
- t.Fatalf("no receipts returned")
+ t.Fatal("no receipts returned")
} else {
if err := checkReceiptsRLP(rs, receipts); err != nil {
- t.Fatalf(err.Error())
+ t.Fatal(err)
}
}
// Delete the body and ensure that the receipts are no longer returned (metadata can't be recomputed)
@@ -401,7 +401,7 @@ func TestBlockReceiptStorage(t *testing.T) {
}
// Ensure that receipts without metadata can be returned without the block body too
if err := checkReceiptsRLP(ReadRawReceipts(db, hash, 0), receipts); err != nil {
- t.Fatalf(err.Error())
+ t.Fatal(err)
}
// Sanity check that body alone without the receipt is a full purge
WriteBody(db, hash, 0, body)
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 13233406fe6c..e48e523f9e4d 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -319,8 +319,8 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string, r
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage.
-func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) {
- db, err := pebble.New(file, cache, handles, namespace, readonly, ephemeral)
+func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
+ db, err := pebble.New(file, cache, handles, namespace, readonly)
if err != nil {
return nil, err
}
@@ -358,9 +358,6 @@ type OpenOptions struct {
Cache int // the capacity(in megabytes) of the data caching
Handles int // number of files to be open simultaneously
ReadOnly bool
- // Ephemeral means that filesystem sync operations should be avoided: data integrity in the face of
- // a crash is not important. This option should typically be used in tests.
- Ephemeral bool
}
// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
@@ -382,7 +379,7 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
}
if o.Type == dbPebble || existingDb == dbPebble {
log.Info("Using pebble as the backing database")
- return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
+ return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
}
if o.Type == dbLeveldb || existingDb == dbLeveldb {
log.Info("Using leveldb as the backing database")
@@ -390,7 +387,7 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
}
// No pre-existing database, no user-requested one either. Default to Pebble.
log.Info("Defaulting to pebble as the backing database")
- return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
+ return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
}
// Open opens both a disk-based key-value database such as leveldb or pebble, but also
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index 29dfdf04fa6f..458e965a770a 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -282,7 +282,6 @@ func (sf *subfetcher) schedule(keys [][]byte, read bool) error {
// Append the tasks to the current queue
sf.lock.Lock()
for _, key := range keys {
- key := key // closure for the append below
sf.tasks = append(sf.tasks, &subfetcherTask{read: read, key: key})
}
sf.lock.Unlock()
diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go
index f0baf667cb6f..a21bb1577b08 100644
--- a/core/tracing/hooks.go
+++ b/core/tracing/hooks.go
@@ -55,9 +55,8 @@ type VMContext struct {
Time uint64
Random *common.Hash
// Effective tx gas price
- GasPrice *big.Int
- ChainConfig *params.ChainConfig
- StateDB StateDB
+ GasPrice *big.Int
+ StateDB StateDB
}
// BlockEvent is emitted upon tracing an incoming block.
diff --git a/core/txindexer.go b/core/txindexer.go
index 70fe5f33220f..b2f2188595f4 100644
--- a/core/txindexer.go
+++ b/core/txindexer.go
@@ -151,9 +151,9 @@ func (indexer *txIndexer) loop(chain *BlockChain) {
if done == nil {
stop = make(chan struct{})
done = make(chan struct{})
- go indexer.run(rawdb.ReadTxIndexTail(indexer.db), head.Block.NumberU64(), stop, done)
+ go indexer.run(rawdb.ReadTxIndexTail(indexer.db), head.Header.Number.Uint64(), stop, done)
}
- lastHead = head.Block.NumberU64()
+ lastHead = head.Header.Number.Uint64()
case <-done:
stop = nil
done = nil
diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go
index be7435247d92..5ce69e37639d 100644
--- a/core/txpool/txpool.go
+++ b/core/txpool/txpool.go
@@ -243,7 +243,7 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) {
select {
case event := <-newHeadCh:
// Chain moved forward, store the head for later consumption
- newHead = event.Block.Header()
+ newHead = event.Header
case head := <-resetDone:
// Previous reset finished, update the old head and allow a new reset
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index eed13ee205bf..17a7dda3578c 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -546,9 +546,7 @@ func TestYParityJSONUnmarshalling(t *testing.T) {
DynamicFeeTxType,
BlobTxType,
} {
- txType := txType
for _, test := range tests {
- test := test
t.Run(fmt.Sprintf("txType=%d: %s", txType, test.name), func(t *testing.T) {
// Copy the base json
testJson := maps.Clone(baseJson)
diff --git a/core/vm/errors.go b/core/vm/errors.go
index 839bf56a1af1..e33c9fcb853c 100644
--- a/core/vm/errors.go
+++ b/core/vm/errors.go
@@ -56,7 +56,7 @@ func (e ErrStackUnderflow) Error() string {
}
func (e ErrStackUnderflow) Unwrap() error {
- return fmt.Errorf("stack underflow")
+ return errors.New("stack underflow")
}
// ErrStackOverflow wraps an evm error when the items on the stack exceeds
@@ -71,7 +71,7 @@ func (e ErrStackOverflow) Error() string {
}
func (e ErrStackOverflow) Unwrap() error {
- return fmt.Errorf("stack overflow")
+ return errors.New("stack overflow")
}
// ErrInvalidOpCode wraps an evm error when an invalid opcode is encountered.
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 616668d565cc..26ff495579f1 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -613,7 +613,6 @@ func (evm *EVM) GetVMContext() *tracing.VMContext {
Time: evm.Context.Time,
Random: evm.Context.Random,
GasPrice: evm.TxContext.GasPrice,
- ChainConfig: evm.ChainConfig(),
StateDB: evm.StateDB,
}
}
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index 9046dad5fef9..1aefc810bdb6 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -395,7 +395,7 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode
cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
cfg.GasLimit = gas
if len(tracerCode) > 0 {
- tracer, err := tracers.DefaultDirectory.New(tracerCode, new(tracers.Context), nil)
+ tracer, err := tracers.DefaultDirectory.New(tracerCode, new(tracers.Context), nil, cfg.ChainConfig)
if err != nil {
b.Fatal(err)
}
@@ -887,7 +887,7 @@ func TestRuntimeJSTracer(t *testing.T) {
statedb.SetCode(common.HexToAddress("0xee"), calleeCode)
statedb.SetCode(common.HexToAddress("0xff"), suicideCode)
- tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil)
+ tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil, params.MergedTestChainConfig)
if err != nil {
t.Fatal(err)
}
@@ -922,7 +922,7 @@ func TestJSTracerCreateTx(t *testing.T) {
code := []byte{byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN)}
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
- tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil)
+ tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil, params.MergedTestChainConfig)
if err != nil {
t.Fatal(err)
}
diff --git a/crypto/signature_nocgo.go b/crypto/signature_nocgo.go
index 5ac3765c7106..16a785a18600 100644
--- a/crypto/signature_nocgo.go
+++ b/crypto/signature_nocgo.go
@@ -25,8 +25,8 @@ import (
"fmt"
"math/big"
- "github.com/btcsuite/btcd/btcec/v2"
- btc_ecdsa "github.com/btcsuite/btcd/btcec/v2/ecdsa"
+ "github.com/decred/dcrd/dcrec/secp256k1/v4"
+ decred_ecdsa "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
)
// Ecrecover returns the uncompressed public key that created the given signature.
@@ -39,16 +39,16 @@ func Ecrecover(hash, sig []byte) ([]byte, error) {
return bytes, err
}
-func sigToPub(hash, sig []byte) (*btcec.PublicKey, error) {
+func sigToPub(hash, sig []byte) (*secp256k1.PublicKey, error) {
if len(sig) != SignatureLength {
return nil, errors.New("invalid signature")
}
- // Convert to btcec input format with 'recovery id' v at the beginning.
+ // Convert to secp256k1 input format with 'recovery id' v at the beginning.
btcsig := make([]byte, SignatureLength)
btcsig[0] = sig[RecoveryIDOffset] + 27
copy(btcsig[1:], sig)
- pub, _, err := btc_ecdsa.RecoverCompact(btcsig, hash)
+ pub, _, err := decred_ecdsa.RecoverCompact(btcsig, hash)
return pub, err
}
@@ -82,13 +82,13 @@ func Sign(hash []byte, prv *ecdsa.PrivateKey) ([]byte, error) {
if prv.Curve != S256() {
return nil, errors.New("private key curve is not secp256k1")
}
- // ecdsa.PrivateKey -> btcec.PrivateKey
- var priv btcec.PrivateKey
+ // ecdsa.PrivateKey -> secp256k1.PrivateKey
+ var priv secp256k1.PrivateKey
if overflow := priv.Key.SetByteSlice(prv.D.Bytes()); overflow || priv.Key.IsZero() {
return nil, errors.New("invalid private key")
}
defer priv.Zero()
- sig := btc_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey
+ sig := decred_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey
// Convert to Ethereum signature format with 'recovery id' v at the end.
v := sig[0] - 27
copy(sig, sig[1:])
@@ -103,19 +103,19 @@ func VerifySignature(pubkey, hash, signature []byte) bool {
if len(signature) != 64 {
return false
}
- var r, s btcec.ModNScalar
+ var r, s secp256k1.ModNScalar
if r.SetByteSlice(signature[:32]) {
return false // overflow
}
if s.SetByteSlice(signature[32:]) {
return false
}
- sig := btc_ecdsa.NewSignature(&r, &s)
- key, err := btcec.ParsePubKey(pubkey)
+ sig := decred_ecdsa.NewSignature(&r, &s)
+ key, err := secp256k1.ParsePubKey(pubkey)
if err != nil {
return false
}
- // Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
+ // Reject malleable signatures. libsecp256k1 does this check but decred doesn't.
if s.IsOverHalfOrder() {
return false
}
@@ -127,7 +127,7 @@ func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) {
if len(pubkey) != 33 {
return nil, errors.New("invalid compressed public key length")
}
- key, err := btcec.ParsePubKey(pubkey)
+ key, err := secp256k1.ParsePubKey(pubkey)
if err != nil {
return nil, err
}
@@ -148,20 +148,20 @@ func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) {
// when constructing a PrivateKey.
func CompressPubkey(pubkey *ecdsa.PublicKey) []byte {
// NOTE: the coordinates may be validated with
- // btcec.ParsePubKey(FromECDSAPub(pubkey))
- var x, y btcec.FieldVal
+ // secp256k1.ParsePubKey(FromECDSAPub(pubkey))
+ var x, y secp256k1.FieldVal
x.SetByteSlice(pubkey.X.Bytes())
y.SetByteSlice(pubkey.Y.Bytes())
- return btcec.NewPublicKey(&x, &y).SerializeCompressed()
+ return secp256k1.NewPublicKey(&x, &y).SerializeCompressed()
}
// S256 returns an instance of the secp256k1 curve.
func S256() EllipticCurve {
- return btCurve{btcec.S256()}
+ return btCurve{secp256k1.S256()}
}
type btCurve struct {
- *btcec.KoblitzCurve
+ *secp256k1.KoblitzCurve
}
// Marshal converts a point given as (x, y) into a byte slice.
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 8a9898b956f3..4e81d68e078f 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -275,10 +275,6 @@ func (b *EthAPIBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) e
return b.eth.BlockChain().SubscribeChainHeadEvent(ch)
}
-func (b *EthAPIBackend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription {
- return b.eth.BlockChain().SubscribeChainSideEvent(ch)
-}
-
func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return b.eth.BlockChain().SubscribeLogsEvent(ch)
}
diff --git a/eth/backend.go b/eth/backend.go
index f10d99c3a70b..663b0e5fe73d 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -197,7 +197,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
)
if config.VMTrace != "" {
- var traceConfig json.RawMessage
+ traceConfig := json.RawMessage("{}")
if config.VMTraceJsonConfig != "" {
traceConfig = json.RawMessage(config.VMTraceJsonConfig)
}
diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go
index c3116cb4b6a7..d4069e50e6bd 100644
--- a/eth/catalyst/api_test.go
+++ b/eth/catalyst/api_test.go
@@ -1600,7 +1600,7 @@ func TestBlockToPayloadWithBlobs(t *testing.T) {
}
block := types.NewBlock(&header, &types.Body{Transactions: txs}, nil, trie.NewStackTrie(nil))
- envelope := engine.BlockToExecutableData(block, nil, sidecars)
+ envelope := engine.BlockToExecutableData(block, nil, sidecars, nil)
var want int
for _, tx := range txs {
want += len(tx.BlobHashes())
diff --git a/eth/catalyst/simulated_beacon_test.go b/eth/catalyst/simulated_beacon_test.go
index 7db176923466..7e9fd7b32453 100644
--- a/eth/catalyst/simulated_beacon_test.go
+++ b/eth/catalyst/simulated_beacon_test.go
@@ -123,16 +123,16 @@ func TestSimulatedBeaconSendWithdrawals(t *testing.T) {
timer := time.NewTimer(12 * time.Second)
for {
select {
- case evt := <-chainHeadCh:
- for _, includedTx := range evt.Block.Transactions() {
+ case ev := <-chainHeadCh:
+ block := ethService.BlockChain().GetBlock(ev.Header.Hash(), ev.Header.Number.Uint64())
+ for _, includedTx := range block.Transactions() {
includedTxs[includedTx.Hash()] = struct{}{}
}
- for _, includedWithdrawal := range evt.Block.Withdrawals() {
+ for _, includedWithdrawal := range block.Withdrawals() {
includedWithdrawals = append(includedWithdrawals, includedWithdrawal.Index)
}
-
// ensure all withdrawals/txs included. this will take two blocks b/c number of withdrawals > 10
- if len(includedTxs) == len(txs) && len(includedWithdrawals) == len(withdrawals) && evt.Block.Number().Cmp(big.NewInt(2)) == 0 {
+ if len(includedTxs) == len(txs) && len(includedWithdrawals) == len(withdrawals) && ev.Header.Number.Cmp(big.NewInt(2)) == 0 {
return
}
case <-timer.C:
@@ -186,11 +186,12 @@ func TestOnDemandSpam(t *testing.T) {
)
for {
select {
- case evt := <-chainHeadCh:
- for _, itx := range evt.Block.Transactions() {
+ case ev := <-chainHeadCh:
+ block := eth.BlockChain().GetBlock(ev.Header.Hash(), ev.Header.Number.Uint64())
+ for _, itx := range block.Transactions() {
includedTxs[itx.Hash()] = struct{}{}
}
- for _, iwx := range evt.Block.Withdrawals() {
+ for _, iwx := range block.Withdrawals() {
includedWxs = append(includedWxs, iwx.Index)
}
// ensure all withdrawals/txs included. this will take two blocks b/c number of withdrawals > 10
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index d147414859f2..fadb68ef03c1 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -541,7 +541,6 @@ func (d *Downloader) spawnSync(fetchers []func() error) error {
errc := make(chan error, len(fetchers))
d.cancelWg.Add(len(fetchers))
for _, fn := range fetchers {
- fn := fn
go func() { defer d.cancelWg.Done(); errc <- fn() }()
}
// Wait for the first error, then terminate the others.
diff --git a/eth/filters/api.go b/eth/filters/api.go
index 23fb1faca896..f46dd39dd8c0 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -273,7 +273,6 @@ func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subsc
select {
case logs := <-matchedLogs:
for _, log := range logs {
- log := log
notifier.Notify(rpcSub.ID, &log)
}
case <-rpcSub.Err(): // client send an unsubscribe request
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index a3a2787a4144..86012b3f9a8b 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -391,7 +391,7 @@ func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent)
func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) {
for _, f := range filters[BlocksSubscription] {
- f.headers <- ev.Block.Header()
+ f.headers <- ev.Header
}
}
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index 1d52afb28243..aec5ee41663d 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -200,7 +200,7 @@ func TestBlockSubscription(t *testing.T) {
)
for _, blk := range chain {
- chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
+ chainEvents = append(chainEvents, core.ChainEvent{Header: blk.Header()})
}
chan0 := make(chan *types.Header)
@@ -213,13 +213,13 @@ func TestBlockSubscription(t *testing.T) {
for i1 != len(chainEvents) || i2 != len(chainEvents) {
select {
case header := <-chan0:
- if chainEvents[i1].Hash != header.Hash() {
- t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
+ if chainEvents[i1].Header.Hash() != header.Hash() {
+ t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Header.Hash(), header.Hash())
}
i1++
case header := <-chan1:
- if chainEvents[i2].Hash != header.Hash() {
- t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
+ if chainEvents[i2].Header.Hash() != header.Hash() {
+ t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Header.Hash(), header.Hash())
}
i2++
}
diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go
index 19a6c0010a60..fe2e4d408aca 100644
--- a/eth/gasprice/gasprice.go
+++ b/eth/gasprice/gasprice.go
@@ -124,10 +124,10 @@ func NewOracle(backend OracleBackend, params Config, startPrice *big.Int) *Oracl
go func() {
var lastHead common.Hash
for ev := range headEvent {
- if ev.Block.ParentHash() != lastHead {
+ if ev.Header.ParentHash != lastHead {
cache.Purge()
}
- lastHead = ev.Block.Hash()
+ lastHead = ev.Header.Hash()
}
}()
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index c41c9abc267f..55f7da87dde0 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -390,8 +390,6 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
}
// Interconnect all the sink handlers with the source handler
for i, sink := range sinks {
- sink := sink // Closure for goroutine below
-
sourcePipe, sinkPipe := p2p.MsgPipe()
defer sourcePipe.Close()
defer sinkPipe.Close()
diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go
index 6eb0d04f6ba8..dc32559c47b9 100644
--- a/eth/protocols/eth/handler.go
+++ b/eth/protocols/eth/handler.go
@@ -93,8 +93,6 @@ type TxPool interface {
func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol {
protocols := make([]p2p.Protocol, 0, len(ProtocolVersions))
for _, version := range ProtocolVersions {
- version := version // Closure
-
protocols = append(protocols, p2p.Protocol{
Name: ProtocolName,
Version: version,
diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go
index a6c60bc0757f..d36f9621b13b 100644
--- a/eth/protocols/snap/handler.go
+++ b/eth/protocols/snap/handler.go
@@ -85,8 +85,6 @@ type Backend interface {
func MakeProtocols(backend Backend) []p2p.Protocol {
protocols := make([]p2p.Protocol, len(ProtocolVersions))
for i, version := range ProtocolVersions {
- version := version // Closure
-
protocols[i] = p2p.Protocol{
Name: ProtocolName,
Version: version,
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index cdd03e6a0c48..9e079f540f07 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -345,7 +345,6 @@ func (task *accountTask) activeSubTasks() map[common.Hash][]*storageTask {
last = task.res.hashes[len(task.res.hashes)-1]
)
for hash, subTasks := range task.SubTasks {
- subTasks := subTasks // closure
if hash.Cmp(last) <= 0 {
tasks[hash] = subTasks
}
@@ -765,8 +764,6 @@ func (s *Syncer) loadSyncStatus() {
}
s.tasks = progress.Tasks
for _, task := range s.tasks {
- task := task // closure for task.genBatch in the stacktrie writer callback
-
// Restore the completed storages
task.stateCompleted = make(map[common.Hash]struct{})
for _, hash := range task.StorageCompleted {
@@ -790,8 +787,6 @@ func (s *Syncer) loadSyncStatus() {
// Restore leftover storage tasks
for accountHash, subtasks := range task.SubTasks {
for _, subtask := range subtasks {
- subtask := subtask // closure for subtask.genBatch in the stacktrie writer callback
-
subtask.genBatch = ethdb.HookedBatch{
Batch: s.db.NewBatch(),
OnPut: func(key []byte, value []byte) {
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index 189afa48d4b3..5b6945f54f0c 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -1011,7 +1011,7 @@ func (api *API) traceTx(ctx context.Context, tx *types.Transaction, message *cor
Stop: logger.Stop,
}
} else {
- tracer, err = DefaultDirectory.New(*config.Tracer, txctx, config.TracerConfig)
+ tracer, err = DefaultDirectory.New(*config.Tracer, txctx, config.TracerConfig, api.backend.ChainConfig())
if err != nil {
return nil, err
}
diff --git a/eth/tracers/dir.go b/eth/tracers/dir.go
index 650815350b37..55bcb44d23ad 100644
--- a/eth/tracers/dir.go
+++ b/eth/tracers/dir.go
@@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/tracing"
+ "github.com/ethereum/go-ethereum/params"
)
// Context contains some contextual infos for a transaction execution that is not
@@ -44,8 +45,8 @@ type Tracer struct {
Stop func(err error)
}
-type ctorFn func(*Context, json.RawMessage) (*Tracer, error)
-type jsCtorFn func(string, *Context, json.RawMessage) (*Tracer, error)
+type ctorFn func(*Context, json.RawMessage, *params.ChainConfig) (*Tracer, error)
+type jsCtorFn func(string, *Context, json.RawMessage, *params.ChainConfig) (*Tracer, error)
type elem struct {
ctor ctorFn
@@ -78,12 +79,15 @@ func (d *directory) RegisterJSEval(f jsCtorFn) {
// New returns a new instance of a tracer, by iterating through the
// registered lookups. Name is either name of an existing tracer
// or an arbitrary JS code.
-func (d *directory) New(name string, ctx *Context, cfg json.RawMessage) (*Tracer, error) {
+func (d *directory) New(name string, ctx *Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*Tracer, error) {
+ if len(cfg) == 0 {
+ cfg = json.RawMessage("{}")
+ }
if elem, ok := d.elems[name]; ok {
- return elem.ctor(ctx, cfg)
+ return elem.ctor(ctx, cfg, chainConfig)
}
// Assume JS code
- return d.jsEval(name, ctx, cfg)
+ return d.jsEval(name, ctx, cfg, chainConfig)
}
// IsJS will return true if the given tracer will evaluate
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index 31b2ef6d16ee..d21e589f3dfc 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -96,7 +96,6 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
- file := file // capture range variable
t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel()
@@ -121,7 +120,7 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
)
state.Close()
- tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
+ tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig, test.Genesis.Config)
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
@@ -183,7 +182,6 @@ func BenchmarkTracers(b *testing.B) {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
- file := file // capture range variable
b.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(b *testing.B) {
blob, err := os.ReadFile(filepath.Join("testdata", "call_tracer", file.Name()))
if err != nil {
@@ -229,7 +227,7 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
- tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), nil)
+ tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), nil, test.Genesis.Config)
if err != nil {
b.Fatalf("failed to create call tracer: %v", err)
}
@@ -266,7 +264,7 @@ func TestInternals(t *testing.T) {
}
)
mkTracer := func(name string, cfg json.RawMessage) *tracers.Tracer {
- tr, err := tracers.DefaultDirectory.New(name, nil, cfg)
+ tr, err := tracers.DefaultDirectory.New(name, nil, cfg, config)
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
diff --git a/eth/tracers/internal/tracetest/flat_calltrace_test.go b/eth/tracers/internal/tracetest/flat_calltrace_test.go
index ec7a944b91de..7a6e1751e87d 100644
--- a/eth/tracers/internal/tracetest/flat_calltrace_test.go
+++ b/eth/tracers/internal/tracetest/flat_calltrace_test.go
@@ -89,7 +89,7 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
defer state.Close()
// Create the tracer, the EVM environment and run it
- tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
+ tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig, test.Genesis.Config)
if err != nil {
return fmt.Errorf("failed to create call tracer: %v", err)
}
@@ -151,7 +151,6 @@ func testFlatCallTracer(tracerName string, dirPath string, t *testing.T) {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
- file := file // capture range variable
t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel()
diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go
index 9cbd12669489..90f59225dfd0 100644
--- a/eth/tracers/internal/tracetest/prestate_test.go
+++ b/eth/tracers/internal/tracetest/prestate_test.go
@@ -73,7 +73,6 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
- file := file // capture range variable
t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel()
@@ -98,7 +97,7 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
)
defer state.Close()
- tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
+ tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig, test.Genesis.Config)
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go
index b823ef740a86..d54752ef216f 100644
--- a/eth/tracers/js/goja.go
+++ b/eth/tracers/js/goja.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/eth/tracers/internal"
+ "github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
"github.com/ethereum/go-ethereum/common"
@@ -46,10 +47,10 @@ func init() {
if err != nil {
panic(err)
}
- type ctorFn = func(*tracers.Context, json.RawMessage) (*tracers.Tracer, error)
+ type ctorFn = func(*tracers.Context, json.RawMessage, *params.ChainConfig) (*tracers.Tracer, error)
lookup := func(code string) ctorFn {
- return func(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
- return newJsTracer(code, ctx, cfg)
+ return func(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
+ return newJsTracer(code, ctx, cfg, chainConfig)
}
}
for name, code := range assetTracers {
@@ -102,6 +103,7 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b
type jsTracer struct {
vm *goja.Runtime
env *tracing.VMContext
+ chainConfig *params.ChainConfig
toBig toBigFn // Converts a hex string into a JS bigint
toBuf toBufFn // Converts a []byte into a JS buffer
fromBuf fromBufFn // Converts an array, hex string or Uint8Array to a []byte
@@ -138,13 +140,14 @@ type jsTracer struct {
// The methods `result` and `fault` are required to be present.
// The methods `step`, `enter`, and `exit` are optional, but note that
// `enter` and `exit` always go together.
-func newJsTracer(code string, ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
+func newJsTracer(code string, ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
vm := goja.New()
// By default field names are exported to JS as is, i.e. capitalized.
vm.SetFieldNameMapper(goja.UncapFieldNameMapper())
t := &jsTracer{
- vm: vm,
- ctx: make(map[string]goja.Value),
+ vm: vm,
+ ctx: make(map[string]goja.Value),
+ chainConfig: chainConfig,
}
t.setTypeConverters()
@@ -244,7 +247,7 @@ func (t *jsTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from
db := &dbObj{db: env.StateDB, vm: t.vm, toBig: t.toBig, toBuf: t.toBuf, fromBuf: t.fromBuf}
t.dbValue = db.setupObject()
// Update list of precompiles based on current block
- rules := env.ChainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
+ rules := t.chainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules)
t.ctx["block"] = t.vm.ToValue(t.env.BlockNumber.Uint64())
t.ctx["gas"] = t.vm.ToValue(tx.Gas())
diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go
index 7122b3c90e2c..ed2789d70dde 100644
--- a/eth/tracers/js/tracer_test.go
+++ b/eth/tracers/js/tracer_test.go
@@ -90,11 +90,12 @@ func runTrace(tracer *tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainCo
func TestTracer(t *testing.T) {
execTracer := func(code string, contract []byte) ([]byte, string) {
t.Helper()
- tracer, err := newJsTracer(code, nil, nil)
+ chainConfig := params.TestChainConfig
+ tracer, err := newJsTracer(code, nil, nil, chainConfig)
if err != nil {
t.Fatal(err)
}
- ret, err := runTrace(tracer, testCtx(), params.TestChainConfig, contract)
+ ret, err := runTrace(tracer, testCtx(), chainConfig, contract)
if err != nil {
return nil, err.Error() // Stringify to allow comparison without nil checks
}
@@ -167,7 +168,8 @@ func TestTracer(t *testing.T) {
func TestHalt(t *testing.T) {
timeout := errors.New("stahp")
- tracer, err := newJsTracer("{step: function() { while(1); }, result: function() { return null; }, fault: function(){}}", nil, nil)
+ chainConfig := params.TestChainConfig
+ tracer, err := newJsTracer("{step: function() { while(1); }, result: function() { return null; }, fault: function(){}}", nil, nil, chainConfig)
if err != nil {
t.Fatal(err)
}
@@ -175,20 +177,21 @@ func TestHalt(t *testing.T) {
time.Sleep(1 * time.Second)
tracer.Stop(timeout)
}()
- if _, err = runTrace(tracer, testCtx(), params.TestChainConfig, nil); !strings.Contains(err.Error(), "stahp") {
+ if _, err = runTrace(tracer, testCtx(), chainConfig, nil); !strings.Contains(err.Error(), "stahp") {
t.Errorf("Expected timeout error, got %v", err)
}
}
func TestHaltBetweenSteps(t *testing.T) {
- tracer, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }}", nil, nil)
+ chainConfig := params.TestChainConfig
+ tracer, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }}", nil, nil, chainConfig)
if err != nil {
t.Fatal(err)
}
scope := &vm.ScopeContext{
Contract: vm.NewContract(&account{}, &account{}, uint256.NewInt(0), 0),
}
- env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: tracer.Hooks})
+ env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(1)}, &dummyStatedb{}, chainConfig, vm.Config{Tracer: tracer.Hooks})
tracer.OnTxStart(env.GetVMContext(), types.NewTx(&types.LegacyTx{}), common.Address{})
tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, []byte{}, 0, big.NewInt(0))
tracer.OnOpcode(0, 0, 0, 0, scope, nil, 0, nil)
@@ -206,11 +209,12 @@ func TestHaltBetweenSteps(t *testing.T) {
func TestNoStepExec(t *testing.T) {
execTracer := func(code string) []byte {
t.Helper()
- tracer, err := newJsTracer(code, nil, nil)
+ chainConfig := params.TestChainConfig
+ tracer, err := newJsTracer(code, nil, nil, chainConfig)
if err != nil {
t.Fatal(err)
}
- env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(100)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: tracer.Hooks})
+ env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(100)}, &dummyStatedb{}, chainConfig, vm.Config{Tracer: tracer.Hooks})
tracer.OnTxStart(env.GetVMContext(), types.NewTx(&types.LegacyTx{}), common.Address{})
tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, []byte{}, 1000, big.NewInt(0))
tracer.OnExit(0, nil, 0, nil, false)
@@ -241,7 +245,7 @@ func TestIsPrecompile(t *testing.T) {
chaincfg.IstanbulBlock = big.NewInt(200)
chaincfg.BerlinBlock = big.NewInt(300)
txCtx := vm.TxContext{GasPrice: big.NewInt(100000)}
- tracer, err := newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil)
+ tracer, err := newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil, chaincfg)
if err != nil {
t.Fatal(err)
}
@@ -255,7 +259,7 @@ func TestIsPrecompile(t *testing.T) {
t.Errorf("tracer should not consider blake2f as precompile in byzantium")
}
- tracer, _ = newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil)
+ tracer, _ = newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil, chaincfg)
blockCtx = vm.BlockContext{BlockNumber: big.NewInt(250)}
res, err = runTrace(tracer, &vmContext{blockCtx, txCtx}, chaincfg, nil)
if err != nil {
@@ -267,15 +271,16 @@ func TestIsPrecompile(t *testing.T) {
}
func TestEnterExit(t *testing.T) {
+ chainConfig := params.TestChainConfig
// test that either both or none of enter() and exit() are defined
- if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}}", new(tracers.Context), nil); err == nil {
+ if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}}", new(tracers.Context), nil, chainConfig); err == nil {
t.Fatal("tracer creation should've failed without exit() definition")
}
- if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}, exit: function() {}}", new(tracers.Context), nil); err != nil {
+ if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}, exit: function() {}}", new(tracers.Context), nil, chainConfig); err != nil {
t.Fatal(err)
}
// test that the enter and exit method are correctly invoked and the values passed
- tracer, err := newJsTracer("{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, step: function() {}, fault: function() {}, result: function() { return {enters: this.enters, exits: this.exits, enterGas: this.enterGas, gasUsed: this.gasUsed} }, enter: function(frame) { this.enters++; this.enterGas = frame.getGas(); }, exit: function(res) { this.exits++; this.gasUsed = res.getGasUsed(); }}", new(tracers.Context), nil)
+ tracer, err := newJsTracer("{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, step: function() {}, fault: function() {}, result: function() { return {enters: this.enters, exits: this.exits, enterGas: this.enterGas, gasUsed: this.gasUsed} }, enter: function(frame) { this.enters++; this.enterGas = frame.getGas(); }, exit: function(res) { this.exits++; this.gasUsed = res.getGasUsed(); }}", new(tracers.Context), nil, chainConfig)
if err != nil {
t.Fatal(err)
}
@@ -297,7 +302,8 @@ func TestEnterExit(t *testing.T) {
func TestSetup(t *testing.T) {
// Test empty config
- _, err := newJsTracer(`{setup: function(cfg) { if (cfg !== "{}") { throw("invalid empty config") } }, fault: function() {}, result: function() {}}`, new(tracers.Context), nil)
+ chainConfig := params.TestChainConfig
+ _, err := newJsTracer(`{setup: function(cfg) { if (cfg !== "{}") { throw("invalid empty config") } }, fault: function() {}, result: function() {}}`, new(tracers.Context), nil, chainConfig)
if err != nil {
t.Error(err)
}
@@ -307,12 +313,12 @@ func TestSetup(t *testing.T) {
t.Fatal(err)
}
// Test no setup func
- _, err = newJsTracer(`{fault: function() {}, result: function() {}}`, new(tracers.Context), cfg)
+ _, err = newJsTracer(`{fault: function() {}, result: function() {}}`, new(tracers.Context), cfg, chainConfig)
if err != nil {
t.Fatal(err)
}
// Test config value
- tracer, err := newJsTracer("{config: null, setup: function(cfg) { this.config = JSON.parse(cfg) }, step: function() {}, fault: function() {}, result: function() { return this.config.foo }}", new(tracers.Context), cfg)
+ tracer, err := newJsTracer("{config: null, setup: function(cfg) { this.config = JSON.parse(cfg) }, step: function() {}, fault: function() {}, result: function() { return this.config.foo }}", new(tracers.Context), cfg, chainConfig)
if err != nil {
t.Fatal(err)
}
diff --git a/eth/tracers/live.go b/eth/tracers/live.go
index ffb2303af4f1..8b222d2e6cdf 100644
--- a/eth/tracers/live.go
+++ b/eth/tracers/live.go
@@ -1,3 +1,19 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
package tracers
import (
@@ -24,6 +40,9 @@ func (d *liveDirectory) Register(name string, f ctorFunc) {
// New instantiates a tracer by name.
func (d *liveDirectory) New(name string, config json.RawMessage) (*tracing.Hooks, error) {
+ if len(config) == 0 {
+ config = json.RawMessage("{}")
+ }
if f, ok := d.elems[name]; ok {
return f(config)
}
diff --git a/eth/tracers/live/noop.go b/eth/tracers/live/noop.go
index 7433c288408f..46c5700d2515 100644
--- a/eth/tracers/live/noop.go
+++ b/eth/tracers/live/noop.go
@@ -1,3 +1,19 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
package live
import (
diff --git a/eth/tracers/live/supply.go b/eth/tracers/live/supply.go
index 96f70594548c..fa4e5b190431 100644
--- a/eth/tracers/live/supply.go
+++ b/eth/tracers/live/supply.go
@@ -1,3 +1,19 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
package live
import (
@@ -19,7 +35,7 @@ import (
)
func init() {
- tracers.LiveDirectory.Register("supply", newSupply)
+ tracers.LiveDirectory.Register("supply", newSupplyTracer)
}
type supplyInfoIssuance struct {
@@ -63,7 +79,7 @@ type supplyTxCallstack struct {
burn *big.Int
}
-type supply struct {
+type supplyTracer struct {
delta supplyInfo
txCallstack []supplyTxCallstack // Callstack for current transaction
logger *lumberjack.Logger
@@ -74,12 +90,10 @@ type supplyTracerConfig struct {
MaxSize int `json:"maxSize"` // MaxSize is the maximum size in megabytes of the tracer log file before it gets rotated. It defaults to 100 megabytes.
}
-func newSupply(cfg json.RawMessage) (*tracing.Hooks, error) {
+func newSupplyTracer(cfg json.RawMessage) (*tracing.Hooks, error) {
var config supplyTracerConfig
- if cfg != nil {
- if err := json.Unmarshal(cfg, &config); err != nil {
- return nil, fmt.Errorf("failed to parse config: %v", err)
- }
+ if err := json.Unmarshal(cfg, &config); err != nil {
+ return nil, fmt.Errorf("failed to parse config: %v", err)
}
if config.Path == "" {
return nil, errors.New("supply tracer output path is required")
@@ -93,19 +107,19 @@ func newSupply(cfg json.RawMessage) (*tracing.Hooks, error) {
logger.MaxSize = config.MaxSize
}
- t := &supply{
+ t := &supplyTracer{
delta: newSupplyInfo(),
logger: logger,
}
return &tracing.Hooks{
- OnBlockStart: t.OnBlockStart,
- OnBlockEnd: t.OnBlockEnd,
- OnGenesisBlock: t.OnGenesisBlock,
- OnTxStart: t.OnTxStart,
- OnBalanceChange: t.OnBalanceChange,
- OnEnter: t.OnEnter,
- OnExit: t.OnExit,
- OnClose: t.OnClose,
+ OnBlockStart: t.onBlockStart,
+ OnBlockEnd: t.onBlockEnd,
+ OnGenesisBlock: t.onGenesisBlock,
+ OnTxStart: t.onTxStart,
+ OnBalanceChange: t.onBalanceChange,
+ OnEnter: t.onEnter,
+ OnExit: t.onExit,
+ OnClose: t.onClose,
}, nil
}
@@ -128,11 +142,11 @@ func newSupplyInfo() supplyInfo {
}
}
-func (s *supply) resetDelta() {
+func (s *supplyTracer) resetDelta() {
s.delta = newSupplyInfo()
}
-func (s *supply) OnBlockStart(ev tracing.BlockEvent) {
+func (s *supplyTracer) onBlockStart(ev tracing.BlockEvent) {
s.resetDelta()
s.delta.Number = ev.Block.NumberU64()
@@ -155,11 +169,11 @@ func (s *supply) OnBlockStart(ev tracing.BlockEvent) {
}
}
-func (s *supply) OnBlockEnd(err error) {
+func (s *supplyTracer) onBlockEnd(err error) {
s.write(s.delta)
}
-func (s *supply) OnGenesisBlock(b *types.Block, alloc types.GenesisAlloc) {
+func (s *supplyTracer) onGenesisBlock(b *types.Block, alloc types.GenesisAlloc) {
s.resetDelta()
s.delta.Number = b.NumberU64()
@@ -174,7 +188,7 @@ func (s *supply) OnGenesisBlock(b *types.Block, alloc types.GenesisAlloc) {
s.write(s.delta)
}
-func (s *supply) OnBalanceChange(a common.Address, prevBalance, newBalance *big.Int, reason tracing.BalanceChangeReason) {
+func (s *supplyTracer) onBalanceChange(a common.Address, prevBalance, newBalance *big.Int, reason tracing.BalanceChangeReason) {
diff := new(big.Int).Sub(newBalance, prevBalance)
// NOTE: don't handle "BalanceIncreaseGenesisBalance" because it is handled in OnGenesisBlock
@@ -193,12 +207,12 @@ func (s *supply) OnBalanceChange(a common.Address, prevBalance, newBalance *big.
}
}
-func (s *supply) OnTxStart(vm *tracing.VMContext, tx *types.Transaction, from common.Address) {
+func (s *supplyTracer) onTxStart(vm *tracing.VMContext, tx *types.Transaction, from common.Address) {
s.txCallstack = make([]supplyTxCallstack, 0, 1)
}
// internalTxsHandler handles internal transactions burned amount
-func (s *supply) internalTxsHandler(call *supplyTxCallstack) {
+func (s *supplyTracer) internalTxsHandler(call *supplyTxCallstack) {
// Handle Burned amount
if call.burn != nil {
s.delta.Burn.Misc.Add(s.delta.Burn.Misc, call.burn)
@@ -211,7 +225,7 @@ func (s *supply) internalTxsHandler(call *supplyTxCallstack) {
}
}
-func (s *supply) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+func (s *supplyTracer) onEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
call := supplyTxCallstack{
calls: make([]supplyTxCallstack, 0),
}
@@ -226,7 +240,7 @@ func (s *supply) OnEnter(depth int, typ byte, from common.Address, to common.Add
s.txCallstack = append(s.txCallstack, call)
}
-func (s *supply) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
+func (s *supplyTracer) onExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
if depth == 0 {
// No need to handle Burned amount if transaction is reverted
if !reverted {
@@ -252,13 +266,13 @@ func (s *supply) OnExit(depth int, output []byte, gasUsed uint64, err error, rev
s.txCallstack[size-1].calls = append(s.txCallstack[size-1].calls, call)
}
-func (s *supply) OnClose() {
+func (s *supplyTracer) onClose() {
if err := s.logger.Close(); err != nil {
log.Warn("failed to close supply tracer log file", "error", err)
}
}
-func (s *supply) write(data any) {
+func (s *supplyTracer) write(data any) {
supply, ok := data.(supplyInfo)
if !ok {
log.Warn("failed to cast supply tracer data on write to log file")
diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go
index b952c822863f..f918ce154b50 100644
--- a/eth/tracers/logger/logger.go
+++ b/eth/tracers/logger/logger.go
@@ -458,7 +458,7 @@ func formatLogs(logs []StructLog) []StructLogRes {
}
formatted[index].Stack = &stack
}
- if trace.ReturnData != nil && len(trace.ReturnData) > 0 {
+ if len(trace.ReturnData) > 0 {
formatted[index].ReturnData = hexutil.Bytes(trace.ReturnData).String()
}
if trace.Memory != nil {
diff --git a/eth/tracers/native/4byte.go b/eth/tracers/native/4byte.go
index 6cb0e433d27d..cec45a1e7a58 100644
--- a/eth/tracers/native/4byte.go
+++ b/eth/tracers/native/4byte.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
+ "github.com/ethereum/go-ethereum/params"
)
func init() {
@@ -48,17 +49,19 @@ func init() {
// 0xc281d19e-0: 1
// }
type fourByteTracer struct {
- ids map[string]int // ids aggregates the 4byte ids found
- interrupt atomic.Bool // Atomic flag to signal execution interruption
- reason error // Textual reason for the interruption
+ ids map[string]int // ids aggregates the 4byte ids found
+ interrupt atomic.Bool // Atomic flag to signal execution interruption
+ reason error // Textual reason for the interruption
+ chainConfig *params.ChainConfig
activePrecompiles []common.Address // Updated on tx start based on given rules
}
// newFourByteTracer returns a native go tracer which collects
// 4 byte-identifiers of a tx, and implements vm.EVMLogger.
-func newFourByteTracer(ctx *tracers.Context, _ json.RawMessage) (*tracers.Tracer, error) {
+func newFourByteTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
t := &fourByteTracer{
- ids: make(map[string]int),
+ ids: make(map[string]int),
+ chainConfig: chainConfig,
}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
@@ -88,7 +91,7 @@ func (t *fourByteTracer) store(id []byte, size int) {
func (t *fourByteTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) {
// Update list of precompiles based on current block
- rules := env.ChainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
+ rules := t.chainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules)
}
diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go
index 1b94dd7b6771..c2247d1ce491 100644
--- a/eth/tracers/native/call.go
+++ b/eth/tracers/native/call.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
+ "github.com/ethereum/go-ethereum/params"
)
//go:generate go run github.com/fjl/gencodec -type callFrame -field-override callFrameMarshaling -out gen_callframe_json.go
@@ -125,7 +126,7 @@ type callTracerConfig struct {
// newCallTracer returns a native go tracer which tracks
// call frames of a tx, and implements vm.EVMLogger.
-func newCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
+func newCallTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
t, err := newCallTracerObject(ctx, cfg)
if err != nil {
return nil, err
@@ -145,10 +146,8 @@ func newCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer,
func newCallTracerObject(ctx *tracers.Context, cfg json.RawMessage) (*callTracer, error) {
var config callTracerConfig
- if cfg != nil {
- if err := json.Unmarshal(cfg, &config); err != nil {
- return nil, err
- }
+ if err := json.Unmarshal(cfg, &config); err != nil {
+ return nil, err
}
// First callframe contains tx context info
// and is populated on start and end.
diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go
index a47b79f8df26..b7cc60b096bd 100644
--- a/eth/tracers/native/call_flat.go
+++ b/eth/tracers/native/call_flat.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
+ "github.com/ethereum/go-ethereum/params"
)
//go:generate go run github.com/fjl/gencodec -type flatCallAction -field-override flatCallActionMarshaling -out gen_flatcallaction_json.go
@@ -114,6 +115,7 @@ type flatCallResultMarshaling struct {
type flatCallTracer struct {
tracer *callTracer
config flatCallTracerConfig
+ chainConfig *params.ChainConfig
ctx *tracers.Context // Holds tracer context data
interrupt atomic.Bool // Atomic flag to signal execution interruption
activePrecompiles []common.Address // Updated on tx start based on given rules
@@ -125,22 +127,20 @@ type flatCallTracerConfig struct {
}
// newFlatCallTracer returns a new flatCallTracer.
-func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
+func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
var config flatCallTracerConfig
- if cfg != nil {
- if err := json.Unmarshal(cfg, &config); err != nil {
- return nil, err
- }
+ if err := json.Unmarshal(cfg, &config); err != nil {
+ return nil, err
}
// Create inner call tracer with default configuration, don't forward
// the OnlyTopCall or WithLog to inner for now
- t, err := newCallTracerObject(ctx, nil)
+ t, err := newCallTracerObject(ctx, json.RawMessage("{}"))
if err != nil {
return nil, err
}
- ft := &flatCallTracer{tracer: t, ctx: ctx, config: config}
+ ft := &flatCallTracer{tracer: t, ctx: ctx, config: config, chainConfig: chainConfig}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
OnTxStart: ft.OnTxStart,
@@ -206,7 +206,7 @@ func (t *flatCallTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction
}
t.tracer.OnTxStart(env, tx, from)
// Update list of precompiles based on current block
- rules := env.ChainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
+ rules := t.chainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules)
}
diff --git a/eth/tracers/native/call_flat_test.go b/eth/tracers/native/call_flat_test.go
index d5481b868bcc..a81af6d6bc19 100644
--- a/eth/tracers/native/call_flat_test.go
+++ b/eth/tracers/native/call_flat_test.go
@@ -31,7 +31,7 @@ import (
)
func TestCallFlatStop(t *testing.T) {
- tracer, err := tracers.DefaultDirectory.New("flatCallTracer", &tracers.Context{}, nil)
+ tracer, err := tracers.DefaultDirectory.New("flatCallTracer", &tracers.Context{}, nil, params.MainnetChainConfig)
require.NoError(t, err)
// this error should be returned by GetResult
@@ -47,9 +47,7 @@ func TestCallFlatStop(t *testing.T) {
Data: nil,
})
- tracer.OnTxStart(&tracing.VMContext{
- ChainConfig: params.MainnetChainConfig,
- }, tx, common.Address{})
+ tracer.OnTxStart(&tracing.VMContext{}, tx, common.Address{})
tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, nil, 0, big.NewInt(0))
diff --git a/eth/tracers/native/mux.go b/eth/tracers/native/mux.go
index c3b1d9f8cafa..77ab254568e6 100644
--- a/eth/tracers/native/mux.go
+++ b/eth/tracers/native/mux.go
@@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/tracers"
+ "github.com/ethereum/go-ethereum/params"
)
func init() {
@@ -38,17 +39,15 @@ type muxTracer struct {
}
// newMuxTracer returns a new mux tracer.
-func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
+func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
var config map[string]json.RawMessage
- if cfg != nil {
- if err := json.Unmarshal(cfg, &config); err != nil {
- return nil, err
- }
+ if err := json.Unmarshal(cfg, &config); err != nil {
+ return nil, err
}
objects := make([]*tracers.Tracer, 0, len(config))
names := make([]string, 0, len(config))
for k, v := range config {
- t, err := tracers.DefaultDirectory.New(k, ctx, v)
+ t, err := tracers.DefaultDirectory.New(k, ctx, v, chainConfig)
if err != nil {
return nil, err
}
diff --git a/eth/tracers/native/noop.go b/eth/tracers/native/noop.go
index f147134610c0..ac174cc25e7f 100644
--- a/eth/tracers/native/noop.go
+++ b/eth/tracers/native/noop.go
@@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/tracers"
+ "github.com/ethereum/go-ethereum/params"
)
func init() {
@@ -35,7 +36,7 @@ func init() {
type noopTracer struct{}
// newNoopTracer returns a new noop tracer.
-func newNoopTracer(ctx *tracers.Context, _ json.RawMessage) (*tracers.Tracer, error) {
+func newNoopTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
t := &noopTracer{}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go
index b353c0696067..978ba0670c92 100644
--- a/eth/tracers/native/prestate.go
+++ b/eth/tracers/native/prestate.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/eth/tracers/internal"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
)
//go:generate go run github.com/fjl/gencodec -type account -field-override accountMarshaling -out gen_account_json.go
@@ -74,12 +75,10 @@ type prestateTracerConfig struct {
DiffMode bool `json:"diffMode"` // If true, this tracer will return state modifications
}
-func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) {
+func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *params.ChainConfig) (*tracers.Tracer, error) {
var config prestateTracerConfig
- if cfg != nil {
- if err := json.Unmarshal(cfg, &config); err != nil {
- return nil, err
- }
+ if err := json.Unmarshal(cfg, &config); err != nil {
+ return nil, err
}
t := &prestateTracer{
pre: stateMap{},
diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go
index 7a3be797a3b7..e2ba9b8c7bc5 100644
--- a/ethdb/pebble/pebble.go
+++ b/ethdb/pebble/pebble.go
@@ -144,7 +144,7 @@ func (l panicLogger) Fatalf(format string, args ...interface{}) {
// New returns a wrapped pebble DB object. The namespace is the prefix that the
// metrics reporting should use for surfacing internal stats.
-func New(file string, cache int, handles int, namespace string, readonly bool, ephemeral bool) (*Database, error) {
+func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) {
// Ensure we have some minimal caching and file guarantees
if cache < minCache {
cache = minCache
@@ -185,7 +185,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e
fn: file,
log: logger,
quitChan: make(chan chan error),
- writeOptions: &pebble.WriteOptions{Sync: !ephemeral},
+ writeOptions: &pebble.WriteOptions{Sync: false},
}
opt := &pebble.Options{
// Pebble has a single combined cache area and the write
@@ -213,12 +213,12 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e
// options for the last level are used for all subsequent levels.
Levels: []pebble.LevelOptions{
{TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
- {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
- {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
- {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
- {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
- {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
- {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
+ {TargetFileSize: 4 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
+ {TargetFileSize: 8 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
+ {TargetFileSize: 16 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
+ {TargetFileSize: 32 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
+ {TargetFileSize: 64 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
+ {TargetFileSize: 128 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)},
},
ReadOnly: readonly,
EventListener: &pebble.EventListener{
diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go
index c845db1164f5..afed5332df38 100644
--- a/ethstats/ethstats.go
+++ b/ethstats/ethstats.go
@@ -219,7 +219,7 @@ func (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core
// Start a goroutine that exhausts the subscriptions to avoid events piling up
var (
quitCh = make(chan struct{})
- headCh = make(chan *types.Block, 1)
+ headCh = make(chan *types.Header, 1)
txCh = make(chan struct{}, 1)
)
go func() {
@@ -231,7 +231,7 @@ func (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core
// Notify of chain head events, but drop if too frequent
case head := <-chainHeadCh:
select {
- case headCh <- head.Block:
+ case headCh <- head.Header:
default:
}
@@ -602,9 +602,9 @@ func (s uncleStats) MarshalJSON() ([]byte, error) {
}
// reportBlock retrieves the current chain head and reports it to the stats server.
-func (s *Service) reportBlock(conn *connWrapper, block *types.Block) error {
+func (s *Service) reportBlock(conn *connWrapper, header *types.Header) error {
// Gather the block details from the header or block chain
- details := s.assembleBlockStats(block)
+ details := s.assembleBlockStats(header)
// Short circuit if the block detail is not available.
if details == nil {
@@ -625,10 +625,9 @@ func (s *Service) reportBlock(conn *connWrapper, block *types.Block) error {
// assembleBlockStats retrieves any required metadata to report a single block
// and assembles the block stats. If block is nil, the current head is processed.
-func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
+func (s *Service) assembleBlockStats(header *types.Header) *blockStats {
// Gather the block infos from the local blockchain
var (
- header *types.Header
td *big.Int
txs []txStats
uncles []*types.Header
@@ -638,16 +637,13 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
fullBackend, ok := s.backend.(fullNodeBackend)
if ok {
// Retrieve current chain head if no block is given.
- if block == nil {
- head := fullBackend.CurrentBlock()
- block, _ = fullBackend.BlockByNumber(context.Background(), rpc.BlockNumber(head.Number.Uint64()))
+ if header == nil {
+ header = fullBackend.CurrentBlock()
}
- // Short circuit if no block is available. It might happen when
- // the blockchain is reorging.
+ block, _ := fullBackend.BlockByNumber(context.Background(), rpc.BlockNumber(header.Number.Uint64()))
if block == nil {
return nil
}
- header = block.Header()
td = fullBackend.GetTd(context.Background(), header.Hash())
txs = make([]txStats, len(block.Transactions()))
@@ -657,15 +653,12 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
uncles = block.Uncles()
} else {
// Light nodes would need on-demand lookups for transactions/uncles, skip
- if block != nil {
- header = block.Header()
- } else {
+ if header == nil {
header = s.backend.CurrentHeader()
}
td = s.backend.GetTd(context.Background(), header.Hash())
txs = []txStats{}
}
-
// Assemble and return the block stats
author, _ := s.engine.Author(header)
@@ -708,19 +701,10 @@ func (s *Service) reportHistory(conn *connWrapper, list []uint64) error {
// Gather the batch of blocks to report
history := make([]*blockStats, len(indexes))
for i, number := range indexes {
- fullBackend, ok := s.backend.(fullNodeBackend)
// Retrieve the next block if it's known to us
- var block *types.Block
- if ok {
- block, _ = fullBackend.BlockByNumber(context.Background(), rpc.BlockNumber(number)) // TODO ignore error here ?
- } else {
- if header, _ := s.backend.HeaderByNumber(context.Background(), rpc.BlockNumber(number)); header != nil {
- block = types.NewBlockWithHeader(header)
- }
- }
- // If we do have the block, add to the history and continue
- if block != nil {
- history[len(history)-1-i] = s.assembleBlockStats(block)
+ header, _ := s.backend.HeaderByNumber(context.Background(), rpc.BlockNumber(number))
+ if header != nil {
+ history[len(history)-1-i] = s.assembleBlockStats(header)
continue
}
// Ran out of blocks, cut the report short and send
diff --git a/go.mod b/go.mod
index 4e5426039ce4..f422be7ed16e 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,6 @@ require (
github.com/aws/aws-sdk-go-v2/config v1.18.45
github.com/aws/aws-sdk-go-v2/credentials v1.13.43
github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2
- github.com/btcsuite/btcd/btcec/v2 v2.3.4
github.com/cespare/cp v0.1.0
github.com/cloudflare/cloudflare-go v0.79.0
github.com/cockroachdb/pebble v1.1.2
@@ -21,6 +20,7 @@ require (
github.com/crate-crypto/go-kzg-4844 v1.0.0
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set/v2 v2.6.0
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1
github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3
github.com/ethereum/c-kzg-4844 v1.0.0
@@ -109,7 +109,6 @@ require (
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
- github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/deepmap/oapi-codegen v1.6.0 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect
diff --git a/go.sum b/go.sum
index 233a93c40241..736af1736271 100644
--- a/go.sum
+++ b/go.sum
@@ -92,10 +92,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
-github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
-github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
-github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
-github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index b1f9d9bd7e70..a27f1f536dba 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -587,9 +587,6 @@ func (b testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscr
func (b testBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
panic("implement me")
}
-func (b testBackend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription {
- panic("implement me")
-}
func (b testBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
panic("implement me")
}
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index 0e991592b4b3..ccc11472b76b 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -71,7 +71,6 @@ type Backend interface {
GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
- SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription
// Transaction pool API
SendTx(ctx context.Context, signedTx *types.Transaction) error
diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go
index f9835a96dabf..08e3515f6b38 100644
--- a/internal/ethapi/transaction_args.go
+++ b/internal/ethapi/transaction_args.go
@@ -189,9 +189,7 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) erro
if args.BlobFeeCap != nil && args.BlobFeeCap.ToInt().Sign() == 0 {
return errors.New("maxFeePerBlobGas, if specified, must be non-zero")
}
- if err := args.setCancunFeeDefaults(ctx, head, b); err != nil {
- return err
- }
+ args.setCancunFeeDefaults(head)
// If both gasPrice and at least one of the EIP-1559 fee parameters are specified, error.
if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) {
return errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
@@ -243,7 +241,7 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) erro
}
// setCancunFeeDefaults fills in reasonable default fee values for unspecified fields.
-func (args *TransactionArgs) setCancunFeeDefaults(ctx context.Context, head *types.Header, b Backend) error {
+func (args *TransactionArgs) setCancunFeeDefaults(head *types.Header) {
// Set maxFeePerBlobGas if it is missing.
if args.BlobHashes != nil && args.BlobFeeCap == nil {
var excessBlobGas uint64
@@ -258,7 +256,6 @@ func (args *TransactionArgs) setCancunFeeDefaults(ctx context.Context, head *typ
val := new(big.Int).Mul(blobBaseFee, big.NewInt(2))
args.BlobFeeCap = (*hexutil.Big)(val)
}
- return nil
}
// setLondonFeeDefaults fills in reasonable default fee values for unspecified fields.
diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go
index 531782817328..a3bf19b6863e 100644
--- a/internal/ethapi/transaction_args_test.go
+++ b/internal/ethapi/transaction_args_test.go
@@ -377,9 +377,6 @@ func (b *backendMock) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subsc
func (b *backendMock) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
return nil
}
-func (b *backendMock) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription {
- return nil
-}
func (b *backendMock) SendTx(ctx context.Context, signedTx *types.Transaction) error { return nil }
func (b *backendMock) GetTransaction(ctx context.Context, txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64, error) {
return false, nil, [32]byte{}, 0, 0, nil
diff --git a/internal/flags/flags_test.go b/internal/flags/flags_test.go
index cfe16b340e72..82e23fb4d2ff 100644
--- a/internal/flags/flags_test.go
+++ b/internal/flags/flags_test.go
@@ -17,15 +17,12 @@
package flags
import (
- "os"
"os/user"
"runtime"
"testing"
)
func TestPathExpansion(t *testing.T) {
- t.Parallel()
-
user, _ := user.Current()
var tests map[string]string
@@ -53,7 +50,7 @@ func TestPathExpansion(t *testing.T) {
}
}
- os.Setenv(`DDDXXX`, `/tmp`)
+ t.Setenv(`DDDXXX`, `/tmp`)
for test, expected := range tests {
t.Run(test, func(t *testing.T) {
t.Parallel()
diff --git a/metrics/json_test.go b/metrics/json_test.go
index f91fe8cfa54f..811bc29f11ec 100644
--- a/metrics/json_test.go
+++ b/metrics/json_test.go
@@ -13,7 +13,7 @@ func TestRegistryMarshallJSON(t *testing.T) {
r.Register("counter", NewCounter())
enc.Encode(r)
if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" {
- t.Fatalf(s)
+ t.Fatal(s)
}
}
diff --git a/miner/payload_building.go b/miner/payload_building.go
index ce4836d8a933..1260d839c9f8 100644
--- a/miner/payload_building.go
+++ b/miner/payload_building.go
@@ -69,26 +69,28 @@ func (args *BuildPayloadArgs) Id() engine.PayloadID {
// the revenue. Therefore, the empty-block here is always available and full-block
// will be set/updated afterwards.
type Payload struct {
- id engine.PayloadID
- empty *types.Block
- emptyWitness *stateless.Witness
- full *types.Block
- fullWitness *stateless.Witness
- sidecars []*types.BlobTxSidecar
- requests [][]byte
- fullFees *big.Int
- stop chan struct{}
- lock sync.Mutex
- cond *sync.Cond
+ id engine.PayloadID
+ empty *types.Block
+ emptyWitness *stateless.Witness
+ full *types.Block
+ fullWitness *stateless.Witness
+ sidecars []*types.BlobTxSidecar
+ emptyRequests [][]byte
+ requests [][]byte
+ fullFees *big.Int
+ stop chan struct{}
+ lock sync.Mutex
+ cond *sync.Cond
}
// newPayload initializes the payload object.
-func newPayload(empty *types.Block, witness *stateless.Witness, id engine.PayloadID) *Payload {
+func newPayload(empty *types.Block, emptyRequests [][]byte, witness *stateless.Witness, id engine.PayloadID) *Payload {
payload := &Payload{
- id: id,
- empty: empty,
- emptyWitness: witness,
- stop: make(chan struct{}),
+ id: id,
+ empty: empty,
+ emptyRequests: emptyRequests,
+ emptyWitness: witness,
+ stop: make(chan struct{}),
}
log.Info("Starting work on payload", "id", payload.id)
payload.cond = sync.NewCond(&payload.lock)
@@ -143,16 +145,14 @@ func (payload *Payload) Resolve() *engine.ExecutionPayloadEnvelope {
close(payload.stop)
}
if payload.full != nil {
- envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars)
- envelope.Requests = payload.requests
+ envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars, payload.emptyRequests)
if payload.fullWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.fullWitness) // cannot fail
}
return envelope
}
- envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil)
- envelope.Requests = payload.requests
+ envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil, payload.emptyRequests)
if payload.emptyWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.emptyWitness) // cannot fail
@@ -166,8 +166,7 @@ func (payload *Payload) ResolveEmpty() *engine.ExecutionPayloadEnvelope {
payload.lock.Lock()
defer payload.lock.Unlock()
- envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil)
- envelope.Requests = payload.requests
+ envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil, payload.emptyRequests)
if payload.emptyWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.emptyWitness) // cannot fail
@@ -198,8 +197,7 @@ func (payload *Payload) ResolveFull() *engine.ExecutionPayloadEnvelope {
default:
close(payload.stop)
}
- envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars)
- envelope.Requests = payload.requests
+ envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars, payload.requests)
if payload.fullWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.fullWitness) // cannot fail
@@ -227,7 +225,7 @@ func (miner *Miner) buildPayload(args *BuildPayloadArgs, witness bool) (*Payload
return nil, empty.err
}
// Construct a payload object for return.
- payload := newPayload(empty.block, empty.witness, args.Id())
+ payload := newPayload(empty.block, empty.requests, empty.witness, args.Id())
// Spin up a routine for updating the payload in background. This strategy
// can maximum the revenue for including transactions with highest fee.
diff --git a/node/api_test.go b/node/api_test.go
index 8761c4883ef8..4033c858710c 100644
--- a/node/api_test.go
+++ b/node/api_test.go
@@ -244,7 +244,6 @@ func TestStartRPC(t *testing.T) {
}
for _, test := range tests {
- test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
diff --git a/node/node_test.go b/node/node_test.go
index 82e814cadade..1552728d0479 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -513,7 +513,6 @@ func TestNodeRPCPrefix(t *testing.T) {
}
for _, test := range tests {
- test := test
name := fmt.Sprintf("http=%s ws=%s", test.httpPrefix, test.wsPrefix)
t.Run(name, func(t *testing.T) {
cfg := &Config{
diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go
index c6f598b7742e..eb0bbac93f05 100644
--- a/node/rpcstack_test.go
+++ b/node/rpcstack_test.go
@@ -522,7 +522,6 @@ func TestGzipHandler(t *testing.T) {
}
for _, test := range tests {
- test := test
t.Run(test.name, func(t *testing.T) {
srv := httptest.NewServer(newGzipHandler(test.handler))
defer srv.Close()
diff --git a/p2p/discover/v5wire/encoding_test.go b/p2p/discover/v5wire/encoding_test.go
index 8dd02620eba7..c66a0da9d3ef 100644
--- a/p2p/discover/v5wire/encoding_test.go
+++ b/p2p/discover/v5wire/encoding_test.go
@@ -395,7 +395,6 @@ func TestTestVectorsV5(t *testing.T) {
}
for _, test := range tests {
- test := test
t.Run(test.name, func(t *testing.T) {
net := newHandshakeTest()
defer net.close()
diff --git a/p2p/peer.go b/p2p/peer.go
index e4482deae9f7..c3834965cc59 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -412,7 +412,6 @@ outer:
func (p *Peer) startProtocols(writeStart <-chan struct{}, writeErr chan<- error) {
p.wg.Add(len(p.running))
for _, proto := range p.running {
- proto := proto
proto.closed = p.closed
proto.wstart = writeStart
proto.werr = writeErr
diff --git a/rlp/decode_test.go b/rlp/decode_test.go
index 07d9c579a6a4..8479a95b255a 100644
--- a/rlp/decode_test.go
+++ b/rlp/decode_test.go
@@ -307,7 +307,6 @@ func TestStreamReadBytes(t *testing.T) {
}
for _, test := range tests {
- test := test
name := fmt.Sprintf("input_%s/size_%d", test.input, test.size)
t.Run(name, func(t *testing.T) {
s := NewStream(bytes.NewReader(unhex(test.input)), 0)
diff --git a/rlp/rlpgen/gen_test.go b/rlp/rlpgen/gen_test.go
index 3b4f5df28765..b4fabb3dc633 100644
--- a/rlp/rlpgen/gen_test.go
+++ b/rlp/rlpgen/gen_test.go
@@ -51,7 +51,6 @@ var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint", "uint256"
func TestOutput(t *testing.T) {
for _, test := range tests {
- test := test
t.Run(test, func(t *testing.T) {
inputFile := filepath.Join("testdata", test+".in.txt")
outputFile := filepath.Join("testdata", test+".out.txt")
diff --git a/rpc/client_test.go b/rpc/client_test.go
index b7607adfce9d..49f2350b404d 100644
--- a/rpc/client_test.go
+++ b/rpc/client_test.go
@@ -776,7 +776,6 @@ func TestClientHTTP(t *testing.T) {
wantResult = echoResult{"a", 1, new(echoArgs)}
)
for i := range results {
- i := i
go func() {
errc <- client.Call(&results[i], "test_echo", wantResult.String, wantResult.Int, wantResult.Args)
}()
diff --git a/rpc/types_test.go b/rpc/types_test.go
index 2fa74f9899bb..64833ffea68c 100644
--- a/rpc/types_test.go
+++ b/rpc/types_test.go
@@ -143,7 +143,6 @@ func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) {
{"finalized", int64(FinalizedBlockNumber)},
}
for _, test := range tests {
- test := test
t.Run(test.name, func(t *testing.T) {
bnh := BlockNumberOrHashWithNumber(BlockNumber(test.number))
marshalled, err := json.Marshal(bnh)
diff --git a/tests/fuzzers/secp256k1/secp_test.go b/tests/fuzzers/secp256k1/secp_test.go
index ca3039764b42..3345a66a67a8 100644
--- a/tests/fuzzers/secp256k1/secp_test.go
+++ b/tests/fuzzers/secp256k1/secp_test.go
@@ -20,7 +20,7 @@ import (
"fmt"
"testing"
- "github.com/btcsuite/btcd/btcec/v2"
+ dcred_secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
)
@@ -38,7 +38,7 @@ func Fuzz(f *testing.F) {
func fuzz(dataP1, dataP2 []byte) {
var (
curveA = secp256k1.S256()
- curveB = btcec.S256()
+ curveB = dcred_secp256k1.S256()
)
// first point
x1, y1 := curveB.ScalarBaseMult(dataP1)
diff --git a/tests/state_test.go b/tests/state_test.go
index 76fec97de0ee..76d5a601c76a 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -104,7 +104,6 @@ func TestExecutionSpecState(t *testing.T) {
func execStateTest(t *testing.T, st *testMatcher, test *StateTest) {
for _, subtest := range test.Subtests() {
- subtest := subtest
key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
// If -short flag is used, we don't execute all four permutations, only
@@ -244,14 +243,12 @@ func runBenchmarkFile(b *testing.B, path string) {
return
}
for _, t := range m {
- t := t
runBenchmark(b, &t)
}
}
func runBenchmark(b *testing.B, t *StateTest) {
for _, subtest := range t.Subtests() {
- subtest := subtest
key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
b.Run(key, func(b *testing.B) {
diff --git a/trie/committer.go b/trie/committer.go
index 863e7bafdc4b..6c4374ccfdd5 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -18,6 +18,7 @@ package trie
import (
"fmt"
+ "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/trie/trienode"
@@ -42,12 +43,12 @@ func newCommitter(nodeset *trienode.NodeSet, tracer *tracer, collectLeaf bool) *
}
// Commit collapses a node down into a hash node.
-func (c *committer) Commit(n node) hashNode {
- return c.commit(nil, n).(hashNode)
+func (c *committer) Commit(n node, parallel bool) hashNode {
+ return c.commit(nil, n, parallel).(hashNode)
}
// commit collapses a node down into a hash node and returns it.
-func (c *committer) commit(path []byte, n node) node {
+func (c *committer) commit(path []byte, n node, parallel bool) node {
// if this path is clean, use available cached data
hash, dirty := n.cache()
if hash != nil && !dirty {
@@ -62,7 +63,7 @@ func (c *committer) commit(path []byte, n node) node {
// If the child is fullNode, recursively commit,
// otherwise it can only be hashNode or valueNode.
if _, ok := cn.Val.(*fullNode); ok {
- collapsed.Val = c.commit(append(path, cn.Key...), cn.Val)
+ collapsed.Val = c.commit(append(path, cn.Key...), cn.Val, false)
}
// The key needs to be copied, since we're adding it to the
// modified nodeset.
@@ -73,7 +74,7 @@ func (c *committer) commit(path []byte, n node) node {
}
return collapsed
case *fullNode:
- hashedKids := c.commitChildren(path, cn)
+ hashedKids := c.commitChildren(path, cn, parallel)
collapsed := cn.copy()
collapsed.Children = hashedKids
@@ -91,8 +92,12 @@ func (c *committer) commit(path []byte, n node) node {
}
// commitChildren commits the children of the given fullnode
-func (c *committer) commitChildren(path []byte, n *fullNode) [17]node {
- var children [17]node
+func (c *committer) commitChildren(path []byte, n *fullNode, parallel bool) [17]node {
+ var (
+ wg sync.WaitGroup
+ nodesMu sync.Mutex
+ children [17]node
+ )
for i := 0; i < 16; i++ {
child := n.Children[i]
if child == nil {
@@ -108,7 +113,24 @@ func (c *committer) commitChildren(path []byte, n *fullNode) [17]node {
// Commit the child recursively and store the "hashed" value.
// Note the returned node can be some embedded nodes, so it's
// possible the type is not hashNode.
- children[i] = c.commit(append(path, byte(i)), child)
+ if !parallel {
+ children[i] = c.commit(append(path, byte(i)), child, false)
+ } else {
+ wg.Add(1)
+ go func(index int) {
+ p := append(path, byte(index))
+ childSet := trienode.NewNodeSet(c.nodes.Owner)
+ childCommitter := newCommitter(childSet, c.tracer, c.collectLeaf)
+ children[index] = childCommitter.commit(p, child, false)
+ nodesMu.Lock()
+ c.nodes.MergeSet(childSet)
+ nodesMu.Unlock()
+ wg.Done()
+ }(i)
+ }
+ }
+ if parallel {
+ wg.Wait()
}
// For the 17th child, it's possible the type is valuenode.
if n.Children[16] != nil {
diff --git a/trie/trie.go b/trie/trie.go
index 885b6b79628c..372684683c90 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -49,6 +49,9 @@ type Trie struct {
// actually unhashed nodes.
unhashed int
+ // uncommitted is the number of updates since last commit.
+ uncommitted int
+
// reader is the handler trie can retrieve nodes from.
reader *trieReader
@@ -64,12 +67,13 @@ func (t *Trie) newFlag() nodeFlag {
// Copy returns a copy of Trie.
func (t *Trie) Copy() *Trie {
return &Trie{
- root: t.root,
- owner: t.owner,
- committed: t.committed,
- unhashed: t.unhashed,
- reader: t.reader,
- tracer: t.tracer.copy(),
+ root: t.root,
+ owner: t.owner,
+ committed: t.committed,
+ reader: t.reader,
+ tracer: t.tracer.copy(),
+ uncommitted: t.uncommitted,
+ unhashed: t.unhashed,
}
}
@@ -309,6 +313,7 @@ func (t *Trie) Update(key, value []byte) error {
func (t *Trie) update(key, value []byte) error {
t.unhashed++
+ t.uncommitted++
k := keybytesToHex(key)
if len(value) != 0 {
_, n, err := t.insert(t.root, nil, k, valueNode(value))
@@ -422,6 +427,7 @@ func (t *Trie) Delete(key []byte) error {
if t.committed {
return ErrCommitted
}
+ t.uncommitted++
t.unhashed++
k := keybytesToHex(key)
_, n, err := t.delete(t.root, nil, k)
@@ -642,7 +648,9 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
for _, path := range t.tracer.deletedNodes() {
nodes.AddNode([]byte(path), trienode.NewDeleted())
}
- t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root)
+ // If the number of changes is below 100, we let one thread handle it
+ t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root, t.uncommitted > 100)
+ t.uncommitted = 0
return rootHash, nodes
}
@@ -678,6 +686,7 @@ func (t *Trie) Reset() {
t.root = nil
t.owner = common.Hash{}
t.unhashed = 0
+ t.uncommitted = 0
t.tracer.reset()
t.committed = false
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 505b517bc593..9b2530bdd48d 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -26,6 +26,7 @@ import (
"math/rand"
"reflect"
"sort"
+ "strings"
"testing"
"testing/quick"
@@ -35,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/internal/testrand"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/holiman/uint256"
@@ -1206,3 +1208,105 @@ func FuzzTrie(f *testing.F) {
}
})
}
+
+func BenchmarkCommit(b *testing.B) {
+ benchmarkCommit(b, 100)
+ benchmarkCommit(b, 500)
+ benchmarkCommit(b, 2000)
+ benchmarkCommit(b, 5000)
+}
+
+func benchmarkCommit(b *testing.B, n int) {
+ b.Run(fmt.Sprintf("commit-%vnodes-sequential", n), func(b *testing.B) {
+ testCommit(b, n, false)
+ })
+ b.Run(fmt.Sprintf("commit-%vnodes-parallel", n), func(b *testing.B) {
+ testCommit(b, n, true)
+ })
+}
+
+func testCommit(b *testing.B, n int, parallel bool) {
+ tries := make([]*Trie, b.N)
+ for i := 0; i < b.N; i++ {
+ tries[i] = NewEmpty(nil)
+ for j := 0; j < n; j++ {
+ key := testrand.Bytes(32)
+ val := testrand.Bytes(32)
+ tries[i].Update(key, val)
+ }
+ tries[i].Hash()
+ if !parallel {
+ tries[i].uncommitted = 0
+ }
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < len(tries); i++ {
+ tries[i].Commit(true)
+ }
+}
+
+func TestCommitCorrect(t *testing.T) {
+ var paraTrie = NewEmpty(nil)
+ var refTrie = NewEmpty(nil)
+
+ for j := 0; j < 5000; j++ {
+ key := testrand.Bytes(32)
+ val := testrand.Bytes(32)
+ paraTrie.Update(key, val)
+ refTrie.Update(common.CopyBytes(key), common.CopyBytes(val))
+ }
+ paraTrie.Hash()
+ refTrie.Hash()
+ refTrie.uncommitted = 0
+
+ haveRoot, haveNodes := paraTrie.Commit(true)
+ wantRoot, wantNodes := refTrie.Commit(true)
+
+ if haveRoot != wantRoot {
+ t.Fatalf("have %x want %x", haveRoot, wantRoot)
+ }
+ have := printSet(haveNodes)
+ want := printSet(wantNodes)
+ if have != want {
+ i := 0
+ for i = 0; i < len(have); i++ {
+ if have[i] != want[i] {
+ break
+ }
+ }
+ if i > 100 {
+ i -= 100
+ }
+ t.Fatalf("have != want\nhave %q\nwant %q", have[i:], want[i:])
+ }
+}
+func printSet(set *trienode.NodeSet) string {
+ var out = new(strings.Builder)
+ fmt.Fprintf(out, "nodeset owner: %v\n", set.Owner)
+ var paths []string
+ for k := range set.Nodes {
+ paths = append(paths, k)
+ }
+ sort.Strings(paths)
+
+ for _, path := range paths {
+ n := set.Nodes[path]
+ // Deletion
+ if n.IsDeleted() {
+ fmt.Fprintf(out, " [-]: %x\n", path)
+ continue
+ }
+ // Insertion or update
+ fmt.Fprintf(out, " [+/*]: %x -> %v \n", path, n.Hash)
+ }
+ sort.Slice(set.Leaves, func(i, j int) bool {
+ a := set.Leaves[i]
+ b := set.Leaves[j]
+ return bytes.Compare(a.Parent[:], b.Parent[:]) < 0
+ })
+ for _, n := range set.Leaves {
+ fmt.Fprintf(out, "[leaf]: %v\n", n)
+ }
+ return out.String()
+}
diff --git a/trie/trienode/node.go b/trie/trienode/node.go
index 09f355f3b590..7debe6ecbc4c 100644
--- a/trie/trienode/node.go
+++ b/trie/trienode/node.go
@@ -18,6 +18,7 @@ package trienode
import (
"fmt"
+ "maps"
"sort"
"strings"
@@ -99,6 +100,23 @@ func (set *NodeSet) AddNode(path []byte, n *Node) {
set.Nodes[string(path)] = n
}
+// MergeSet merges this 'set' with 'other'. It assumes that the sets are disjoint,
+// and thus does not deduplicate data (count deletes, dedup leaves etc).
+func (set *NodeSet) MergeSet(other *NodeSet) error {
+ if set.Owner != other.Owner {
+ return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, other.Owner)
+ }
+ maps.Copy(set.Nodes, other.Nodes)
+
+ set.deletes += other.deletes
+ set.updates += other.updates
+
+ // Since we assume the sets are disjoint, we can safely append leaves
+ // like this without deduplication.
+ set.Leaves = append(set.Leaves, other.Leaves...)
+ return nil
+}
+
// Merge adds a set of nodes into the set.
func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error {
if set.Owner != owner {