From 1593828d868ff45aaebaf31d11d6d861383344c9 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Thu, 13 Jul 2023 17:49:16 -0500 Subject: [PATCH 01/46] First attempt --- miner/algo_common.go | 243 ++++++++++++++++++++++++++++++++--- miner/algo_common_test.go | 2 +- miner/algo_greedy_buckets.go | 3 +- 3 files changed, 229 insertions(+), 19 deletions(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index ee036e5e1d..289141b48f 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -4,6 +4,7 @@ import ( "crypto/ecdsa" "errors" "fmt" + "github.com/ethereum/go-ethereum/crypto" "math/big" "sync/atomic" @@ -102,30 +103,30 @@ func newEnvironmentDiff(env *environment) *environmentDiff { } } -func (e *environmentDiff) copy() *environmentDiff { - gasPool := new(core.GasPool).AddGas(e.gasPool.Gas()) +func (envDiff *environmentDiff) copy() *environmentDiff { + gasPool := new(core.GasPool).AddGas(envDiff.gasPool.Gas()) return &environmentDiff{ - baseEnvironment: e.baseEnvironment.copy(), - header: types.CopyHeader(e.header), + baseEnvironment: envDiff.baseEnvironment.copy(), + header: types.CopyHeader(envDiff.header), gasPool: gasPool, - state: e.state.Copy(), - newProfit: new(big.Int).Set(e.newProfit), - newTxs: e.newTxs[:], - newReceipts: e.newReceipts[:], + state: envDiff.state.Copy(), + newProfit: new(big.Int).Set(envDiff.newProfit), + newTxs: envDiff.newTxs[:], + newReceipts: envDiff.newReceipts[:], } } -func (e *environmentDiff) applyToBaseEnv() { - env := e.baseEnvironment - env.gasPool = new(core.GasPool).AddGas(e.gasPool.Gas()) - env.header = e.header +func (envDiff *environmentDiff) applyToBaseEnv() { + env := envDiff.baseEnvironment + env.gasPool = new(core.GasPool).AddGas(envDiff.gasPool.Gas()) + env.header = envDiff.header env.state.StopPrefetcher() - env.state = e.state - env.profit.Add(env.profit, e.newProfit) - env.tcount += len(e.newTxs) - env.txs = append(env.txs, e.newTxs...) - env.receipts = append(env.receipts, e.newReceipts...) + env.state = envDiff.state + env.profit.Add(env.profit, envDiff.newProfit) + env.tcount += len(envDiff.newTxs) + env.txs = append(env.txs, envDiff.newTxs...) + env.receipts = append(env.receipts, envDiff.newReceipts...) } func checkInterrupt(i *int32) bool { @@ -248,6 +249,214 @@ func (envDiff *environmentDiff) commitTx(tx *types.Transaction, chData chainData return receipt, shiftTx, nil } +func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData chainData, interrupt *int32, algoConf algorithmConfig) error { + // we don't want to finalize until all the transactions in the bundle have been successfully applied, otherwise snapshot fails + var ( + coinbase = envDiff.baseEnvironment.coinbase + + //coinbaseBalanceBefore = envDiff.state.GetBalance(coinbase) + + //profitBefore = new(big.Int).Set(envDiff.newProfit) + hasBaseFee = envDiff.header.BaseFee != nil + + //gasUsed uint64 + bundleErr error + + TryApply = func( + envDiff *environmentDiff, chData chainData, header *types.Header, + baseFee *big.Int, tx *types.Transaction, + vmConf vm.Config, gp *core.GasPool, + ) (*types.Receipt, uint64, uint64, error) { // receipt, cumulativeGas, gasPool, error + envDiff.state.SetTxContext(tx.Hash(), envDiff.baseEnvironment.tcount+len(envDiff.newTxs)) + + blacklist := chData.blacklist + if len(blacklist) == 0 { + // TODO: apply transaction without blacklist + } + + sender, err := types.Sender(envDiff.baseEnvironment.signer, tx) + if err != nil { + return nil, 0, 0, err + } + + if _, in := blacklist[sender]; in { + return nil, 0, 0, errors.New("blacklist violation, tx.sender") + } + + if to := tx.To(); to != nil { + if _, in := blacklist[*to]; in { + return nil, 0, 0, errors.New("blacklist violation, tx.to") + } + } + + // we set precompile to nil, but they are set in the validation code + // there will be no difference in the result if precompile is not it the blocklist + touchTracer := logger.NewAccessListTracer(nil, common.Address{}, common.Address{}, nil) + vmConf.Tracer = touchTracer + vmConf.Debug = true + + var ( + hook = func() error { + for _, accessTuple := range touchTracer.AccessList() { + if _, in := blacklist[accessTuple.Address]; in { + return errors.New("blacklist violation, tx trace") + } + } + return nil + } + + used = header.GasUsed + gasPool = new(core.GasPool).AddGas(gp.Gas()) + + bc = chData.chain + author = coinbase + statedb = envDiff.state + chainConfig = chData.chainConfig + + //snap = envDiff.state.Snapshot() + ) + msg, err := core.TransactionToMessage(tx, types.MakeSigner(chData.chainConfig, header.Number), baseFee) + if err != nil { + return nil, 0, 0, err + } + // Create a new context to be used in the EVM environment + blockContext := core.NewEVMBlockContext(header, bc, &author) + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, chainConfig, vmConf) + + txContext := core.NewEVMTxContext(msg) + vmenv.Reset(txContext, statedb) + result, err := core.ApplyMessage(vmenv, msg, gasPool) + if err != nil { + //statedb.RevertToSnapshot(snap) + return nil, 0, 0, err + } + + err = hook() + if err != nil { + //statedb.RevertToSnapshot(snap) + return nil, 0, 0, err + } + + used += result.UsedGas + + // Create a new receipt for the transaction, storing the intermediate root and gas used + // by the tx. + receipt := &types.Receipt{Type: tx.Type(), PostState: make([]byte, 0), CumulativeGasUsed: used} + if result.Failed() { + receipt.Status = types.ReceiptStatusFailed + } else { + receipt.Status = types.ReceiptStatusSuccessful + } + receipt.TxHash = tx.Hash() + receipt.GasUsed = result.UsedGas + + // If the transaction created a contract, store the creation address in the receipt. + if msg.To == nil { + receipt.ContractAddress = crypto.CreateAddress(vmenv.TxContext.Origin, tx.Nonce()) + } + + // Set the receipt logs and create the bloom filter. + receipt.Logs = statedb.GetLogs(tx.Hash(), header.Number.Uint64(), header.Hash()) + receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) + receipt.BlockHash = header.Hash() + receipt.BlockNumber = header.Number + receipt.TransactionIndex = uint(statedb.TxIndex()) + return receipt, used, gasPool.Gas(), nil + } + ) + + var ( + gp = new(core.GasPool).AddGas(envDiff.gasPool.Gas()) + //cumulativeGas uint64 + txs = make([]*types.Transaction, 0, len(bundle.OriginalBundle.Txs)) + receipts = make([]*types.Receipt, 0, len(bundle.OriginalBundle.Txs)) + gasUsed uint64 + profitTally = new(big.Int) + + snap = envDiff.state.Snapshot() + + header = types.CopyHeader(envDiff.header) + ) + for _, tx := range bundle.OriginalBundle.Txs { + if hasBaseFee && tx.Type() == types.DynamicFeeTxType { + // Sanity check for extremely large numbers + if tx.GasFeeCap().BitLen() > 256 { + bundleErr = core.ErrFeeCapVeryHigh + break + } + if tx.GasTipCap().BitLen() > 256 { + bundleErr = core.ErrTipVeryHigh + break + } + + // Ensure gasFeeCap is greater than or equal to gasTipCap. + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + bundleErr = core.ErrTipAboveFeeCap + break + } + } + + if tx.Value().Sign() == -1 { + bundleErr = core.ErrNegativeValue + break + } + + gasPrice, err := tx.EffectiveGasTip(envDiff.header.BaseFee) + if err != nil { + bundleErr = err + break + } + + _, err = types.Sender(envDiff.baseEnvironment.signer, tx) + if err != nil { + bundleErr = err + break + } + + if checkInterrupt(interrupt) { + bundleErr = errInterrupt + break + } + + // Try committing the transaction but without finalisation + receipt, cumulative, gasPool, err := TryApply(envDiff, chData, header, + header.BaseFee, tx, *chData.chain.GetVMConfig(), envDiff.gasPool) + if err != nil { + bundleErr = err + break + } + + if receipt.Status != types.ReceiptStatusSuccessful && !bundle.OriginalBundle.RevertingHash(tx.Hash()) { + bundleErr = errors.New("bundle tx revert") + break + } + + // Update the gas pool and the cumulative gas used + gp.SetGas(gasPool) + profitTally.Add(profitTally, new(big.Int).Mul(gasPrice, new(big.Int).SetUint64(receipt.GasUsed))) + header.GasUsed = cumulative + //cumulativeGas = cumulative + txs = append(txs, tx) + receipts = append(receipts, receipt) + gasUsed += receipt.GasUsed + } + + if bundleErr != nil { + envDiff.state.RevertToSnapshot(snap) + return bundleErr + } + // skip profit calculation for now cause that gets complicated + // TODO: actually if we tally up the profit diffs and egp diffs we can calculate the profit without finalizing state + envDiff.state.Finalise(true) + envDiff.gasPool.SetGas(gp.Gas()) + envDiff.newTxs = append(envDiff.newTxs, txs...) + envDiff.newReceipts = append(envDiff.newReceipts, receipts...) + envDiff.newProfit = new(big.Int).Add(envDiff.newProfit, profitTally) + //envDiff.header.GasUsed += cumulativeGas + envDiff.header.GasUsed = header.GasUsed + return nil +} + // Commit Bundle to env diff func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chData chainData, interrupt *int32, algoConf algorithmConfig) error { coinbase := envDiff.baseEnvironment.coinbase diff --git a/miner/algo_common_test.go b/miner/algo_common_test.go index 676d934135..c85ec02154 100644 --- a/miner/algo_common_test.go +++ b/miner/algo_common_test.go @@ -543,7 +543,7 @@ func TestGetSealingWorkAlgosWithProfit(t *testing.T) { testConfig.BuilderTxSigningKey, err = crypto.GenerateKey() require.NoError(t, err) testConfig.AlgoType = algoType - t.Logf("running for %d", algoType) + t.Logf("running for %s", algoType.String()) testBundles(t) } } diff --git a/miner/algo_greedy_buckets.go b/miner/algo_greedy_buckets.go index c1d8f48689..cd3b4b54ff 100644 --- a/miner/algo_greedy_buckets.go +++ b/miner/algo_greedy_buckets.go @@ -119,7 +119,8 @@ func (b *greedyBucketsBuilder) commit(envDiff *environmentDiff, log.Trace("Included tx", "EGP", effGapPrice.String(), "gasUsed", receipt.GasUsed) } } else if bundle := order.Bundle(); bundle != nil { - err := envDiff.commitBundle(bundle, b.chainData, b.interrupt, algoConf) + //err := envDiff.commitBundle(bundle, b.chainData, b.interrupt, algoConf) + err := envDiff._bundle(bundle, b.chainData, b.interrupt, algoConf) if err != nil { log.Trace("Could not apply bundle", "bundle", bundle.OriginalBundle.Hash, "err", err) From 39e25e5a555389d50733539c8bb2b00028617ccb Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Thu, 13 Jul 2023 17:55:12 -0500 Subject: [PATCH 02/46] Try granular snaps --- miner/algo_common.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index 289141b48f..8efc7c819f 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -313,7 +313,7 @@ func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData ch statedb = envDiff.state chainConfig = chData.chainConfig - //snap = envDiff.state.Snapshot() + snap = envDiff.state.Snapshot() ) msg, err := core.TransactionToMessage(tx, types.MakeSigner(chData.chainConfig, header.Number), baseFee) if err != nil { @@ -327,13 +327,13 @@ func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData ch vmenv.Reset(txContext, statedb) result, err := core.ApplyMessage(vmenv, msg, gasPool) if err != nil { - //statedb.RevertToSnapshot(snap) + statedb.RevertToSnapshot(snap) return nil, 0, 0, err } err = hook() if err != nil { - //statedb.RevertToSnapshot(snap) + statedb.RevertToSnapshot(snap) return nil, 0, 0, err } @@ -365,6 +365,7 @@ func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData ch } ) + envDiff.state.Finalise(true) var ( gp = new(core.GasPool).AddGas(envDiff.gasPool.Gas()) //cumulativeGas uint64 From d323b21bda93e758951d591ab24d9a5402cb7ddd Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 19 Jul 2023 01:11:30 -0500 Subject: [PATCH 03/46] Try appending to access list and reverting on bundle failure --- core/state/access_list.go | 46 ++++++++++++++++++++++++++++++++++----- core/state/statedb.go | 15 +++++++++++++ miner/algo_common.go | 20 +++++++++++++++-- 3 files changed, 74 insertions(+), 7 deletions(-) diff --git a/core/state/access_list.go b/core/state/access_list.go index 4194691345..e627aa37c1 100644 --- a/core/state/access_list.go +++ b/core/state/access_list.go @@ -17,6 +17,7 @@ package state import ( + "fmt" "github.com/ethereum/go-ethereum/common" ) @@ -25,6 +26,12 @@ type accessList struct { slots []map[common.Hash]struct{} } +type AccessLists []*accessList + +func (als AccessLists) Append(al *accessList) AccessLists { + return append(als, al) +} + // ContainsAddress returns true if the address is in the access list. func (al *accessList) ContainsAddress(address common.Address) bool { _, ok := al.addresses[address] @@ -55,13 +62,13 @@ func newAccessList() *accessList { } // Copy creates an independent copy of an accessList. -func (a *accessList) Copy() *accessList { +func (al *accessList) Copy() *accessList { cp := newAccessList() - for k, v := range a.addresses { + for k, v := range al.addresses { cp.addresses[k] = v } - cp.slots = make([]map[common.Hash]struct{}, len(a.slots)) - for i, slotMap := range a.slots { + cp.slots = make([]map[common.Hash]struct{}, len(al.slots)) + for i, slotMap := range al.slots { newSlotmap := make(map[common.Hash]struct{}, len(slotMap)) for k := range slotMap { newSlotmap[k] = struct{}{} @@ -71,6 +78,34 @@ func (a *accessList) Copy() *accessList { return cp } +func (al *accessList) Append(other *accessList) *accessList { + for k, v := range other.addresses { + if _, exists := al.addresses[k]; !exists { + al.addresses[k] = v + } + } + + for i, slotMap := range other.slots { + var newSlotMap map[common.Hash]struct{} + if i >= len(al.slots) { + newSlotMap = make(map[common.Hash]struct{}, len(slotMap)) + } else { + newSlotMap = al.slots[i] + } + + for k := range slotMap { + newSlotMap[k] = struct{}{} + } + + if i >= len(al.slots) { + al.slots = append(al.slots, newSlotMap) + } else { + al.slots[i] = newSlotMap + } + } + return al +} + // AddAddress adds an address to the access list, and returns 'true' if the operation // caused a change (addr was not previously in the list). func (al *accessList) AddAddress(address common.Address) bool { @@ -114,7 +149,8 @@ func (al *accessList) DeleteSlot(address common.Address, slot common.Hash) { idx, addrOk := al.addresses[address] // There are two ways this can fail if !addrOk { - panic("reverting slot change, address not present in list") + panic(fmt.Sprintf("reverting slot change, address %x not present in list", address)) + //panic("reverting slot change, address not present in list") } slotmap := al.slots[idx] delete(slotmap, slot) diff --git a/core/state/statedb.go b/core/state/statedb.go index 256bd3e95f..06c2e301df 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -833,6 +833,17 @@ func (s *StateDB) RevertToSnapshot(revid int) { s.validRevisions = s.validRevisions[:idx] } +func (s *StateDB) RevertToSnapshotWithAccessList(revid int, accessList *accessList) { + prev := s.accessList + s.accessList = accessList + s.RevertToSnapshot(revid) + s.accessList = prev +} + +func (s *StateDB) SetAccessList(accessList *accessList) { + s.accessList = accessList +} + // GetRefund returns the current value of the refund counter. func (s *StateDB) GetRefund() uint64 { return s.refund @@ -1181,3 +1192,7 @@ func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common. } return ret } + +func (s *StateDB) AccessList() *accessList { + return s.accessList +} diff --git a/miner/algo_common.go b/miner/algo_common.go index 8efc7c819f..26c0cb6c51 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -374,10 +374,15 @@ func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData ch gasUsed uint64 profitTally = new(big.Int) - snap = envDiff.state.Snapshot() + originalAccessList = envDiff.state.AccessList().Copy() + snap = envDiff.state.Snapshot() + accessLists = make(state.AccessLists, 0, len(bundle.OriginalBundle.Txs)) + revisions = make([]int, 0, len(bundle.OriginalBundle.Txs)) header = types.CopyHeader(envDiff.header) ) + accessLists = accessLists.Append(originalAccessList) + revisions = append(revisions, snap) for _, tx := range bundle.OriginalBundle.Txs { if hasBaseFee && tx.Type() == types.DynamicFeeTxType { // Sanity check for extremely large numbers @@ -427,6 +432,9 @@ func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData ch break } + accessLists = accessLists.Append(envDiff.state.AccessList().Copy()) + revisions = append(revisions, envDiff.state.Snapshot()) + if receipt.Status != types.ReceiptStatusSuccessful && !bundle.OriginalBundle.RevertingHash(tx.Hash()) { bundleErr = errors.New("bundle tx revert") break @@ -443,7 +451,15 @@ func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData ch } if bundleErr != nil { - envDiff.state.RevertToSnapshot(snap) + for i := len(accessLists) - 1; i > 0; i-- { + envDiff.state.RevertToSnapshotWithAccessList(revisions[i], accessLists[i]) + envDiff.state.SetAccessList(accessLists[i]) + } + envDiff.state.RevertToSnapshot(revisions[0]) + envDiff.state.SetAccessList(originalAccessList) + //envDiff.state.RevertToSnapshotWithAccessList(snap, tmpAccessList.Append(envDiff.state.AccessList())) + //envDiff.state.SetAccessList(originalAccessList) + //envDiff.state.RevertToSnapshot(snap) return bundleErr } // skip profit calculation for now cause that gets complicated From d51683a8c4d7e9c75cbb5714708472e9ab896530 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 19 Jul 2023 03:01:20 -0500 Subject: [PATCH 04/46] Revert to beginning using extra access list when available --- core/state/access_list.go | 28 ---------------------------- miner/algo_common.go | 6 +++++- 2 files changed, 5 insertions(+), 29 deletions(-) diff --git a/core/state/access_list.go b/core/state/access_list.go index e627aa37c1..cb4410ecdf 100644 --- a/core/state/access_list.go +++ b/core/state/access_list.go @@ -78,34 +78,6 @@ func (al *accessList) Copy() *accessList { return cp } -func (al *accessList) Append(other *accessList) *accessList { - for k, v := range other.addresses { - if _, exists := al.addresses[k]; !exists { - al.addresses[k] = v - } - } - - for i, slotMap := range other.slots { - var newSlotMap map[common.Hash]struct{} - if i >= len(al.slots) { - newSlotMap = make(map[common.Hash]struct{}, len(slotMap)) - } else { - newSlotMap = al.slots[i] - } - - for k := range slotMap { - newSlotMap[k] = struct{}{} - } - - if i >= len(al.slots) { - al.slots = append(al.slots, newSlotMap) - } else { - al.slots[i] = newSlotMap - } - } - return al -} - // AddAddress adds an address to the access list, and returns 'true' if the operation // caused a change (addr was not previously in the list). func (al *accessList) AddAddress(address common.Address) bool { diff --git a/miner/algo_common.go b/miner/algo_common.go index 26c0cb6c51..7d987f23e2 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -455,7 +455,11 @@ func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData ch envDiff.state.RevertToSnapshotWithAccessList(revisions[i], accessLists[i]) envDiff.state.SetAccessList(accessLists[i]) } - envDiff.state.RevertToSnapshot(revisions[0]) + + if len(accessLists) > 1 { + envDiff.state.RevertToSnapshotWithAccessList(revisions[0], accessLists[1]) + } + //envDiff.state.RevertToSnapshot(revisions[0]) envDiff.state.SetAccessList(originalAccessList) //envDiff.state.RevertToSnapshotWithAccessList(snap, tmpAccessList.Append(envDiff.state.AccessList())) //envDiff.state.SetAccessList(originalAccessList) From 2445edb955d09dd4b97bbbc081b5a6be08ca97ba Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 19 Jul 2023 03:08:30 -0500 Subject: [PATCH 05/46] Add access list and revision after apply to avoid index out of range or panic on revert --- miner/algo_common.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index 7d987f23e2..6962030eb5 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -427,14 +427,13 @@ func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData ch // Try committing the transaction but without finalisation receipt, cumulative, gasPool, err := TryApply(envDiff, chData, header, header.BaseFee, tx, *chData.chain.GetVMConfig(), envDiff.gasPool) + accessLists = accessLists.Append(envDiff.state.AccessList().Copy()) + revisions = append(revisions, envDiff.state.Snapshot()) if err != nil { bundleErr = err break } - accessLists = accessLists.Append(envDiff.state.AccessList().Copy()) - revisions = append(revisions, envDiff.state.Snapshot()) - if receipt.Status != types.ReceiptStatusSuccessful && !bundle.OriginalBundle.RevertingHash(tx.Hash()) { bundleErr = errors.New("bundle tx revert") break From de80c006efc671e79cabd6c35c741701c1d0aa12 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 19 Jul 2023 03:27:39 -0500 Subject: [PATCH 06/46] Try copy before revert and avoid initial environment copy --- miner/algo_common.go | 4 +++- miner/algo_greedy_buckets.go | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index 6962030eb5..c481dba611 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -451,8 +451,10 @@ func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData ch if bundleErr != nil { for i := len(accessLists) - 1; i > 0; i-- { + prev := envDiff.state.AccessList().Copy() envDiff.state.RevertToSnapshotWithAccessList(revisions[i], accessLists[i]) - envDiff.state.SetAccessList(accessLists[i]) + envDiff.state.SetAccessList(prev) + //envDiff.state.SetAccessList(accessLists[i]) } if len(accessLists) > 1 { diff --git a/miner/algo_greedy_buckets.go b/miner/algo_greedy_buckets.go index cd3b4b54ff..34e7a57f7f 100644 --- a/miner/algo_greedy_buckets.go +++ b/miner/algo_greedy_buckets.go @@ -165,6 +165,9 @@ func (b *greedyBucketsBuilder) commit(envDiff *environmentDiff, usedEntry.Success = false usedSbundles = append(usedSbundles, usedEntry) } + } else { + usedEntry.Success = false + usedSbundles = append(usedSbundles, usedEntry) } continue } @@ -242,7 +245,8 @@ func (b *greedyBucketsBuilder) mergeOrdersIntoEnvDiff( func (b *greedyBucketsBuilder) buildBlock(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { orders := types.NewTransactionsByPriceAndNonce(b.inputEnvironment.signer, transactions, simBundles, simSBundles, b.inputEnvironment.header.BaseFee) - envDiff := newEnvironmentDiff(b.inputEnvironment.copy()) + //envDiff := newEnvironmentDiff(b.inputEnvironment.copy()) + envDiff := newEnvironmentDiff(b.inputEnvironment) usedBundles, usedSbundles := b.mergeOrdersIntoEnvDiff(envDiff, orders) envDiff.applyToBaseEnv() return envDiff.baseEnvironment, usedBundles, usedSbundles From 95b97ea0e3d16f1f5fd4b624f586025221bfc729 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 19 Jul 2023 03:35:33 -0500 Subject: [PATCH 07/46] Update revert logic --- miner/algo_common.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index c481dba611..8748ffb1f3 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -449,12 +449,10 @@ func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData ch gasUsed += receipt.GasUsed } + accessLists = accessLists.Append(envDiff.state.AccessList().Copy()) if bundleErr != nil { - for i := len(accessLists) - 1; i > 0; i-- { - prev := envDiff.state.AccessList().Copy() - envDiff.state.RevertToSnapshotWithAccessList(revisions[i], accessLists[i]) - envDiff.state.SetAccessList(prev) - //envDiff.state.SetAccessList(accessLists[i]) + for i := len(accessLists) - 2; i > 0; i-- { + envDiff.state.RevertToSnapshotWithAccessList(revisions[i], accessLists[i+1]) } if len(accessLists) > 1 { From 4b77fbe38497f41948370690a949145124859127 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 19 Jul 2023 03:43:02 -0500 Subject: [PATCH 08/46] Add access list parity --- miner/algo_common.go | 1 + 1 file changed, 1 insertion(+) diff --git a/miner/algo_common.go b/miner/algo_common.go index 8748ffb1f3..a4adc490d8 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -453,6 +453,7 @@ func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData ch if bundleErr != nil { for i := len(accessLists) - 2; i > 0; i-- { envDiff.state.RevertToSnapshotWithAccessList(revisions[i], accessLists[i+1]) + envDiff.state.SetAccessList(accessLists[i]) } if len(accessLists) > 1 { From e369cf6eb590d31f4a15f007ccf8d809a65cfb60 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 19 Jul 2023 04:03:21 -0500 Subject: [PATCH 09/46] Initial commit for efficient revert experimentation --- core/state/access_list.go | 10 +- core/state/multi_tx_snapshot.go | 231 ++++++++++++++++ core/state/multi_tx_snapshot_test.go | 394 +++++++++++++++++++++++++++ core/state/state_object.go | 4 + core/state/statedb.go | 59 +++- miner/algo_common.go | 238 +--------------- miner/algo_greedy_buckets.go | 165 ++++++++--- miner/algo_greedy_test.go | 5 +- miner/algo_test.go | 5 +- miner/order_apply_changes.go | 198 ++++++++++++++ miner/worker.go | 8 +- 11 files changed, 1024 insertions(+), 293 deletions(-) create mode 100644 core/state/multi_tx_snapshot.go create mode 100644 core/state/multi_tx_snapshot_test.go create mode 100644 miner/order_apply_changes.go diff --git a/core/state/access_list.go b/core/state/access_list.go index cb4410ecdf..0ff5c3db6e 100644 --- a/core/state/access_list.go +++ b/core/state/access_list.go @@ -17,7 +17,6 @@ package state import ( - "fmt" "github.com/ethereum/go-ethereum/common" ) @@ -26,12 +25,6 @@ type accessList struct { slots []map[common.Hash]struct{} } -type AccessLists []*accessList - -func (als AccessLists) Append(al *accessList) AccessLists { - return append(als, al) -} - // ContainsAddress returns true if the address is in the access list. func (al *accessList) ContainsAddress(address common.Address) bool { _, ok := al.addresses[address] @@ -121,8 +114,7 @@ func (al *accessList) DeleteSlot(address common.Address, slot common.Hash) { idx, addrOk := al.addresses[address] // There are two ways this can fail if !addrOk { - panic(fmt.Sprintf("reverting slot change, address %x not present in list", address)) - //panic("reverting slot change, address not present in list") + panic("reverting slot change, address not present in list") } slotmap := al.slots[idx] delete(slotmap, slot) diff --git a/core/state/multi_tx_snapshot.go b/core/state/multi_tx_snapshot.go new file mode 100644 index 0000000000..b5c9e76f3f --- /dev/null +++ b/core/state/multi_tx_snapshot.go @@ -0,0 +1,231 @@ +package state + +import ( + "github.com/ethereum/go-ethereum/common" + "math/big" +) + +// MultiTxSnapshot retains StateDB changes for multiple transactions. +type MultiTxSnapshot struct { + invalid bool + + numLogsAdded map[common.Hash]int + + prevObjects map[common.Address]*stateObject + + accountStorage map[common.Address]map[common.Hash]*common.Hash + accountBalance map[common.Address]*big.Int + accountNonce map[common.Address]uint64 + accountCode map[common.Address][]byte + accountCodeHash map[common.Address][]byte + + accountSuicided map[common.Address]bool + accountDeleted map[common.Address]bool + + accountNotPending map[common.Address]struct{} + accountNotDirty map[common.Address]struct{} +} + +func NewMultiTxSnapshot() *MultiTxSnapshot { + return &MultiTxSnapshot{ + numLogsAdded: make(map[common.Hash]int), + prevObjects: make(map[common.Address]*stateObject), + accountStorage: make(map[common.Address]map[common.Hash]*common.Hash), + accountBalance: make(map[common.Address]*big.Int), + accountNonce: make(map[common.Address]uint64), + accountCode: make(map[common.Address][]byte), + accountCodeHash: make(map[common.Address][]byte), + accountSuicided: make(map[common.Address]bool), + accountDeleted: make(map[common.Address]bool), + accountNotPending: make(map[common.Address]struct{}), + accountNotDirty: make(map[common.Address]struct{}), + } +} + +// updateFromJournal updates the snapshot with the changes from the journal. +func (s *MultiTxSnapshot) updateFromJournal(journal *journal) { + for _, journalEntry := range journal.entries { + switch entry := journalEntry.(type) { + case balanceChange: + s.updateBalanceChange(entry) + case nonceChange: + s.updateNonceChange(entry) + case codeChange: + s.updateCodeChange(entry) + case addLogChange: + s.numLogsAdded[entry.txhash]++ + case createObjectChange: + s.updateCreateObjectChange(entry) + case resetObjectChange: + s.updateResetObjectChange(entry) + case suicideChange: + s.updateSuicideChange(entry) + } + } +} + +// objectChanged returns whether the object was changed (in the set of prevObjects). +func (s *MultiTxSnapshot) objectChanged(address common.Address) bool { + _, ok := s.prevObjects[address] + return ok +} + +// updateBalanceChange updates the snapshot with the balance change. +func (s *MultiTxSnapshot) updateBalanceChange(change balanceChange) { + if s.objectChanged(*change.account) { + return + } + if _, ok := s.accountBalance[*change.account]; !ok { + s.accountBalance[*change.account] = change.prev + } +} + +// updateNonceChange updates the snapshot with the nonce change. +func (s *MultiTxSnapshot) updateNonceChange(change nonceChange) { + if s.objectChanged(*change.account) { + return + } + if _, ok := s.accountNonce[*change.account]; !ok { + s.accountNonce[*change.account] = change.prev + } +} + +// updateCodeChange updates the snapshot with the code change. +func (s *MultiTxSnapshot) updateCodeChange(change codeChange) { + if s.objectChanged(*change.account) { + return + } + if _, ok := s.accountCode[*change.account]; !ok { + s.accountCode[*change.account] = change.prevcode + s.accountCodeHash[*change.account] = change.prevhash + } +} + +// updateResetObjectChange updates the snapshot with the reset object change. +func (s *MultiTxSnapshot) updateResetObjectChange(change resetObjectChange) { + address := change.prev.address + if _, ok := s.prevObjects[address]; !ok { + s.prevObjects[address] = change.prev + } +} + +// updateCreateObjectChange updates the snapshot with the createObjectChange. +func (s *MultiTxSnapshot) updateCreateObjectChange(change createObjectChange) { + if _, ok := s.prevObjects[*change.account]; !ok { + s.prevObjects[*change.account] = nil + } +} + +// updateSuicideChange updates the snapshot with the suicide change. +func (s *MultiTxSnapshot) updateSuicideChange(change suicideChange) { + if s.objectChanged(*change.account) { + return + } + if _, ok := s.accountSuicided[*change.account]; !ok { + s.accountSuicided[*change.account] = change.prev + } + if _, ok := s.accountBalance[*change.account]; !ok { + s.accountBalance[*change.account] = change.prevbalance + } +} + +// updatePendingStorage updates the snapshot with the pending storage change. +func (s *MultiTxSnapshot) updatePendingStorage(address common.Address, key, value common.Hash, ok bool) { + if s.objectChanged(address) { + return + } + if _, ok := s.accountStorage[address]; !ok { + s.accountStorage[address] = make(map[common.Hash]*common.Hash) + } + if _, ok := s.accountStorage[address][key]; ok { + return + } + if ok { + s.accountStorage[address][key] = &value + } else { + s.accountStorage[address][key] = nil + } +} + +// updatePendingStatus updates the snapshot with previous pending status. +func (s *MultiTxSnapshot) updatePendingStatus(address common.Address, pending, dirty bool) { + if !pending { + s.accountNotPending[address] = struct{}{} + } + if !dirty { + s.accountNotDirty[address] = struct{}{} + } +} + +// updateObjectDeleted updates the snapshot with the object deletion. +func (s *MultiTxSnapshot) updateObjectDeleted(address common.Address, deleted bool) { + if s.objectChanged(address) { + return + } + if _, ok := s.accountDeleted[address]; !ok { + s.accountDeleted[address] = deleted + } +} + +// revertState reverts the state to the snapshot. +func (s *MultiTxSnapshot) revertState(st *StateDB) { + // remove all the logs added + for txhash, numLogs := range s.numLogsAdded { + lens := len(st.logs[txhash]) + if lens == numLogs { + delete(st.logs, txhash) + } else { + st.logs[txhash] = st.logs[txhash][:lens-numLogs] + } + st.logSize -= uint(numLogs) + } + + // restore the objects + for address, object := range s.prevObjects { + if object == nil { + delete(st.stateObjects, address) + } else { + st.stateObjects[address] = object + } + } + + // restore storage + for address, storage := range s.accountStorage { + for key, value := range storage { + if value == nil { + delete(st.stateObjects[address].pendingStorage, key) + } else { + st.stateObjects[address].pendingStorage[key] = *value + } + } + } + + // restore balance + for address, balance := range s.accountBalance { + st.stateObjects[address].setBalance(balance) + } + // restore nonce + for address, nonce := range s.accountNonce { + st.stateObjects[address].setNonce(nonce) + } + // restore code + for address, code := range s.accountCode { + st.stateObjects[address].setCode(common.BytesToHash(s.accountCodeHash[address]), code) + } + // restore suicided + for address, suicided := range s.accountSuicided { + st.stateObjects[address].suicided = suicided + } + // restore deleted + for address, deleted := range s.accountDeleted { + st.stateObjects[address].deleted = deleted + } + + // restore pending status + for address := range s.accountNotPending { + delete(st.stateObjectsPending, address) + } + for address := range s.accountNotDirty { + delete(st.stateObjectsDirty, address) + } +} diff --git a/core/state/multi_tx_snapshot_test.go b/core/state/multi_tx_snapshot_test.go new file mode 100644 index 0000000000..2f32294e25 --- /dev/null +++ b/core/state/multi_tx_snapshot_test.go @@ -0,0 +1,394 @@ +package state + +import ( + "bytes" + "fmt" + "github.com/ethereum/go-ethereum/common" + "math/big" + "math/rand" + "testing" +) + +var ( + addrs []common.Address + keys []common.Hash +) + +func init() { + for i := 0; i < 20; i++ { + addrs = append(addrs, common.HexToAddress(fmt.Sprintf("0x%02x", i))) + } + for i := 0; i < 10; i++ { + keys = append(keys, common.HexToHash(fmt.Sprintf("0x%02x", i))) + } +} + +type observableAccountState struct { + address common.Address + balance *big.Int + nonce uint64 + code []byte + codeHash common.Hash + codeSize int + + state map[common.Hash]common.Hash + committedState map[common.Hash]common.Hash + + selfDestruct bool + exist bool + empty bool +} + +func getObservableAccountState(s *StateDB, address common.Address, storageKeys []common.Hash) *observableAccountState { + state := &observableAccountState{ + address: address, + balance: s.GetBalance(address), + nonce: s.GetNonce(address), + code: s.GetCode(address), + codeHash: s.GetCodeHash(address), + codeSize: s.GetCodeSize(address), + state: make(map[common.Hash]common.Hash), + committedState: make(map[common.Hash]common.Hash), + selfDestruct: s.HasSuicided(address), + exist: s.Exist(address), + empty: s.Empty(address), + } + + for _, key := range storageKeys { + state.state[key] = s.GetState(address, key) + state.committedState[key] = s.GetCommittedState(address, key) + } + + return state +} + +func verifyObservableAccountState(s *StateDB, state *observableAccountState) error { + if s.GetBalance(state.address).Cmp(state.balance) != 0 { + return fmt.Errorf("balance mismatch %v != %v", s.GetBalance(state.address), state.balance) + } + if s.GetNonce(state.address) != state.nonce { + return fmt.Errorf("nonce mismatch %v != %v", s.GetNonce(state.address), state.nonce) + } + if bytes.Compare(s.GetCode(state.address), state.code) != 0 { + return fmt.Errorf("code mismatch %v != %v", s.GetCode(state.address), state.code) + } + if s.GetCodeHash(state.address) != state.codeHash { + return fmt.Errorf("code hash mismatch %v != %v", s.GetCodeHash(state.address), state.codeHash) + } + if s.GetCodeSize(state.address) != state.codeSize { + return fmt.Errorf("code size mismatch %v != %v", s.GetCodeSize(state.address), state.codeSize) + } + for key, value := range state.state { + if s.GetState(state.address, key) != value { + return fmt.Errorf("state mismatch %v != %v", s.GetState(state.address, key), value) + } + } + for key, value := range state.committedState { + if s.GetCommittedState(state.address, key) != value { + return fmt.Errorf("committed state mismatch %v != %v", s.GetCommittedState(state.address, key), value) + } + } + if s.HasSuicided(state.address) != state.selfDestruct { + return fmt.Errorf("self destruct mismatch %v != %v", s.HasSuicided(state.address), state.selfDestruct) + } + if s.Exist(state.address) != state.exist { + return fmt.Errorf("exist mismatch %v != %v", s.Exist(state.address), state.exist) + } + if s.Empty(state.address) != state.empty { + return fmt.Errorf("empty mismatch %v != %v", s.Empty(state.address), state.empty) + } + return nil +} + +func randomBytes(n int) []byte { + b := make([]byte, n) + rand.Read(b) + return b +} + +func randomHash() common.Hash { + return common.BytesToHash(randomBytes(32)) +} + +func randFillAccountState(addr common.Address, s *StateDB) { + for i, key := range keys { + // Fill some keys with zero value, others with random value + if i%5 == 0 { + s.SetState(addr, key, common.BigToHash(common.Big0)) + } else { + s.SetState(addr, key, randomHash()) + } + } +} + +func randFillAccount(addr common.Address, s *StateDB) { + s.SetNonce(addr, rand.Uint64()) + s.SetBalance(addr, big.NewInt(rand.Int63())) + s.SetCode(addr, randomBytes(rand.Intn(100))) + randFillAccountState(addr, s) +} + +func prepareInitialState(s *StateDB) { + // We neet to create realistic state for statedb + // for this we apply some changes + // 1. Before calling intermediateRoot + // 2. After calling intermediateRoot but before calling Finalise + var beforeCommitHooks, afterCommitHooks []func(addr common.Address, s *StateDB) + addAccount := func(beforeCommit, afterCommit func(addr common.Address, s *StateDB)) { + beforeCommitHooks = append(beforeCommitHooks, beforeCommit) + afterCommitHooks = append(afterCommitHooks, afterCommit) + } + + rand.Seed(0) + + addAccount(func(addr common.Address, s *StateDB) { + s.SetNonce(addr, rand.Uint64()) + }, nil) + addAccount(nil, func(addr common.Address, s *StateDB) { + s.SetNonce(addr, rand.Uint64()) + }) + addAccount(func(addr common.Address, s *StateDB) { + s.SetNonce(addr, rand.Uint64()) + }, func(addr common.Address, s *StateDB) { + s.SetNonce(addr, rand.Uint64()) + }) + + addAccount(func(addr common.Address, s *StateDB) { + s.SetBalance(addr, big.NewInt(rand.Int63())) + }, nil) + addAccount(nil, func(addr common.Address, s *StateDB) { + s.SetBalance(addr, big.NewInt(rand.Int63())) + }) + addAccount(func(addr common.Address, s *StateDB) { + s.SetBalance(addr, big.NewInt(rand.Int63())) + }, func(addr common.Address, s *StateDB) { + s.SetBalance(addr, big.NewInt(rand.Int63())) + }) + + addAccount(func(addr common.Address, s *StateDB) { + s.SetCode(addr, randomBytes(rand.Intn(100))) + }, nil) + addAccount(nil, func(addr common.Address, s *StateDB) { + s.SetCode(addr, randomBytes(rand.Intn(100))) + }) + addAccount(func(addr common.Address, s *StateDB) { + s.SetCode(addr, randomBytes(rand.Intn(100))) + s.SetCode(addr, nil) + }, func(addr common.Address, s *StateDB) { + s.SetCode(addr, randomBytes(rand.Intn(100))) + }) + addAccount(func(addr common.Address, s *StateDB) { + s.SetCode(addr, randomBytes(rand.Intn(100))) + s.Suicide(addr) + }, func(addr common.Address, s *StateDB) { + s.SetCode(addr, randomBytes(rand.Intn(100))) + }) + + addAccount(func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + s.Suicide(addr) + }, nil) + addAccount(nil, func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + s.Suicide(addr) + }) + addAccount(func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + }, func(addr common.Address, s *StateDB) { + s.Suicide(addr) + }) + addAccount(func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + s.Suicide(addr) + }, func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + }) + addAccount(func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + s.Suicide(addr) + }, func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + // calling it twice is possible + s.Suicide(addr) + s.Suicide(addr) + }) + + addAccount(func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + }, nil) + addAccount(nil, func(addr common.Address, s *StateDB) { + randFillAccount(addr, s) + }) + + for i, beforeHook := range beforeCommitHooks { + if beforeHook != nil { + beforeHook(addrs[i], s) + } + } + s.IntermediateRoot(true) + + for i, afterHook := range afterCommitHooks { + if afterHook != nil { + afterHook(addrs[i], s) + } + } + s.Finalise(true) +} + +func testMutliTxSnapshot(t *testing.T, actions func(s *StateDB)) { + s := newStateTest() + prepareInitialState(s.state) + + var obsStates []*observableAccountState + for _, account := range addrs { + obsStates = append(obsStates, getObservableAccountState(s.state, account, keys)) + } + + pendingAddressesBefore := make(map[common.Address]struct{}) + for k, v := range s.state.stateObjectsPending { + pendingAddressesBefore[k] = v + } + dirtyAddressesBefore := make(map[common.Address]struct{}) + for k, v := range s.state.stateObjectsDirty { + dirtyAddressesBefore[k] = v + } + + err := s.state.MultiTxSnapshot() + if err != nil { + t.Fatal("MultiTxSnapshot failed", err) + } + + if actions != nil { + actions(s.state) + } + + err = s.state.MultiTxSnapshotRevert() + if err != nil { + t.Fatal("MultiTxSnapshotRevert failed", err) + } + + for _, obsState := range obsStates { + err := verifyObservableAccountState(s.state, obsState) + if err != nil { + t.Error("state mismatch", "account", obsState.address, err) + } + } + + if len(s.state.stateObjectsPending) != len(pendingAddressesBefore) { + t.Error("pending state objects count mismatch", "got", len(s.state.stateObjectsPending), "expected", len(pendingAddressesBefore)) + } + for k, _ := range s.state.stateObjectsPending { + if _, ok := pendingAddressesBefore[k]; !ok { + t.Error("stateObjectsPending mismatch, before was nil", "address", k) + } + } + if len(s.state.stateObjectsDirty) != len(dirtyAddressesBefore) { + t.Error("dirty state objects count mismatch", "got", len(s.state.stateObjectsDirty), "expected", len(dirtyAddressesBefore)) + } + for k, _ := range s.state.stateObjectsDirty { + if _, ok := dirtyAddressesBefore[k]; !ok { + t.Error("stateObjectsDirty mismatch, before was nil", "address", k) + } + } + + root := s.state.IntermediateRoot(true) + + cleanState := newStateTest() + prepareInitialState(cleanState.state) + expectedRoot := cleanState.state.IntermediateRoot(true) + + if root != expectedRoot { + t.Error("root mismatch", "got", root, "expected", expectedRoot) + } +} + +func TestMultiTxSnapshotAccountChangesSimple(t *testing.T) { + testMutliTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + s.SetNonce(addr, 78) + s.SetBalance(addr, big.NewInt(79)) + s.SetCode(addr, []byte{0x80}) + } + s.Finalise(true) + }) +} + +func TestMultiTxSnapshotAccountChangesMultiTx(t *testing.T) { + testMutliTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + s.SetNonce(addr, 78) + s.SetBalance(addr, big.NewInt(79)) + s.SetCode(addr, []byte{0x80}) + } + s.Finalise(true) + + for _, addr := range addrs { + s.SetNonce(addr, 79) + s.SetBalance(addr, big.NewInt(80)) + s.SetCode(addr, []byte{0x81}) + } + s.Finalise(true) + }) +} + +func TestMultiTxSnapshotAccountChangesSelfDestruct(t *testing.T) { + testMutliTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + s.SetNonce(addr, 78) + s.SetBalance(addr, big.NewInt(79)) + s.SetCode(addr, []byte{0x80}) + } + s.Finalise(true) + + for _, addr := range addrs { + s.Suicide(addr) + } + s.Finalise(true) + + for _, addr := range addrs { + s.SetNonce(addr, 79) + s.SetBalance(addr, big.NewInt(80)) + s.SetCode(addr, []byte{0x81}) + } + s.Finalise(true) + }) +} + +func TestMultiTxSnapshotAccountChangesEmptyAccount(t *testing.T) { + testMutliTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + s.SetNonce(addr, 78) + s.SetBalance(addr, big.NewInt(79)) + s.SetCode(addr, []byte{0x80}) + } + s.Finalise(true) + + for _, addr := range addrs { + s.SetNonce(addr, 0) + s.SetBalance(addr, common.Big0) + s.SetCode(addr, nil) + } + s.Finalise(true) + + for _, addr := range addrs { + s.SetNonce(addr, 79) + s.SetBalance(addr, big.NewInt(80)) + s.SetCode(addr, []byte{0x81}) + } + s.Finalise(true) + }) +} + +func TestMultiTxSnapshotStateChanges(t *testing.T) { + testMutliTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + randFillAccountState(addr, s) + } + s.Finalise(true) + + for _, addr := range addrs { + randFillAccountState(addr, s) + } + s.Finalise(true) + }) +} diff --git a/core/state/state_object.go b/core/state/state_object.go index 7e34cba44a..1f74ee606d 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -247,6 +247,10 @@ func (s *stateObject) setState(key, value common.Hash) { func (s *stateObject) finalise(prefetch bool) { slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) for key, value := range s.dirtyStorage { + if multiSnap := s.db.multiTxSnapshot; multiSnap != nil { + prev, ok := s.pendingStorage[key] + multiSnap.updatePendingStorage(s.address, key, prev, ok) + } s.pendingStorage[key] = value if value != s.originStorage[key] { slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure diff --git a/core/state/statedb.go b/core/state/statedb.go index 06c2e301df..da2f3a3cbf 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -109,6 +109,9 @@ type StateDB struct { validRevisions []revision nextRevisionId int + // Multi-Transaction Snapshot + multiTxSnapshot *MultiTxSnapshot + // Measurements gathered during execution for debugging purposes AccountReads time.Duration AccountHashes time.Duration @@ -833,17 +836,6 @@ func (s *StateDB) RevertToSnapshot(revid int) { s.validRevisions = s.validRevisions[:idx] } -func (s *StateDB) RevertToSnapshotWithAccessList(revid int, accessList *accessList) { - prev := s.accessList - s.accessList = accessList - s.RevertToSnapshot(revid) - s.accessList = prev -} - -func (s *StateDB) SetAccessList(accessList *accessList) { - s.accessList = accessList -} - // GetRefund returns the current value of the refund counter. func (s *StateDB) GetRefund() uint64 { return s.refund @@ -853,6 +845,9 @@ func (s *StateDB) GetRefund() uint64 { // the journal as well as the refunds. Finalise, however, will not push any updates // into the tries just yet. Only IntermediateRoot or Commit will do that. func (s *StateDB) Finalise(deleteEmptyObjects bool) { + if multiSnap := s.multiTxSnapshot; multiSnap != nil { + multiSnap.updateFromJournal(s.journal) + } addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) for addr := range s.journal.dirties { obj, exist := s.stateObjects[addr] @@ -866,6 +861,10 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { continue } if obj.suicided || (deleteEmptyObjects && obj.empty()) { + if multiSnap := s.multiTxSnapshot; multiSnap != nil { + multiSnap.updateObjectDeleted(obj.address, obj.deleted) + } + obj.deleted = true // We need to maintain account deletions explicitly (will remain @@ -883,6 +882,11 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { } else { obj.finalise(true) // Prefetch slots in the background } + if multiSnap := s.multiTxSnapshot; multiSnap != nil { + _, wasPending := s.stateObjectsPending[addr] + _, wasDirty := s.stateObjectsDirty[addr] + multiSnap.updatePendingStatus(addr, wasPending, wasDirty) + } s.stateObjectsPending[addr] = struct{}{} s.stateObjectsDirty[addr] = struct{}{} @@ -905,6 +909,10 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) + if s.multiTxSnapshot != nil { + s.multiTxSnapshot.invalid = true + } + // If there was a trie prefetcher operating, it gets aborted and irrevocably // modified after we start retrieving tries. Remove it from the statedb after // this round of use. @@ -1196,3 +1204,32 @@ func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common. func (s *StateDB) AccessList() *accessList { return s.accessList } + +// MultiTxSnapshot creates new checkpoint for multi txs reverts +func (s *StateDB) MultiTxSnapshot() error { + if s.multiTxSnapshot != nil { + return errors.New("multi tx snapshot already exists") + } + s.multiTxSnapshot = NewMultiTxSnapshot() + return nil +} + +func (s *StateDB) MultiTxSnapshotRevert() error { + if s.multiTxSnapshot == nil { + return errors.New("multi tx snapshot does not exist") + } + if s.multiTxSnapshot.invalid { + return errors.New("multi tx snapshot is invalid") + } + s.multiTxSnapshot.revertState(s) + s.multiTxSnapshot = nil + return nil +} + +func (s *StateDB) MultiTxSnapshotDiscard() error { + if s.multiTxSnapshot == nil { + return errors.New("multi tx snapshot does not exist") + } + s.multiTxSnapshot = nil + return nil +} diff --git a/miner/algo_common.go b/miner/algo_common.go index a4adc490d8..378b0e403e 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -4,7 +4,6 @@ import ( "crypto/ecdsa" "errors" "fmt" - "github.com/ethereum/go-ethereum/crypto" "math/big" "sync/atomic" @@ -38,6 +37,8 @@ var ( EnforceProfit: false, ExpectedProfit: common.Big0, ProfitThresholdPercent: defaultProfitThreshold, + PriceCutoffPercent: defaultPriceCutoffPercent, + EnableMultiTxSnap: false, } ) @@ -45,6 +46,10 @@ var emptyCodeHash = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca var errInterrupt = errors.New("miner worker interrupted") +// BuildBlockFunc is the function signature for building a block +type BuildBlockFunc func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, + transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) + // lowProfitError is returned when an order is not committed due to low profit or low effective gas price type lowProfitError struct { ExpectedProfit *big.Int @@ -74,6 +79,8 @@ type algorithmConfig struct { // is 10 (i.e. 10%), then the minimum effective gas price included in the same bucket as the top transaction // is (1000 * 10%) = 100 wei. PriceCutoffPercent int + + EnableMultiTxSnap bool } type chainData struct { @@ -249,235 +256,6 @@ func (envDiff *environmentDiff) commitTx(tx *types.Transaction, chData chainData return receipt, shiftTx, nil } -func (envDiff *environmentDiff) _bundle(bundle *types.SimulatedBundle, chData chainData, interrupt *int32, algoConf algorithmConfig) error { - // we don't want to finalize until all the transactions in the bundle have been successfully applied, otherwise snapshot fails - var ( - coinbase = envDiff.baseEnvironment.coinbase - - //coinbaseBalanceBefore = envDiff.state.GetBalance(coinbase) - - //profitBefore = new(big.Int).Set(envDiff.newProfit) - hasBaseFee = envDiff.header.BaseFee != nil - - //gasUsed uint64 - bundleErr error - - TryApply = func( - envDiff *environmentDiff, chData chainData, header *types.Header, - baseFee *big.Int, tx *types.Transaction, - vmConf vm.Config, gp *core.GasPool, - ) (*types.Receipt, uint64, uint64, error) { // receipt, cumulativeGas, gasPool, error - envDiff.state.SetTxContext(tx.Hash(), envDiff.baseEnvironment.tcount+len(envDiff.newTxs)) - - blacklist := chData.blacklist - if len(blacklist) == 0 { - // TODO: apply transaction without blacklist - } - - sender, err := types.Sender(envDiff.baseEnvironment.signer, tx) - if err != nil { - return nil, 0, 0, err - } - - if _, in := blacklist[sender]; in { - return nil, 0, 0, errors.New("blacklist violation, tx.sender") - } - - if to := tx.To(); to != nil { - if _, in := blacklist[*to]; in { - return nil, 0, 0, errors.New("blacklist violation, tx.to") - } - } - - // we set precompile to nil, but they are set in the validation code - // there will be no difference in the result if precompile is not it the blocklist - touchTracer := logger.NewAccessListTracer(nil, common.Address{}, common.Address{}, nil) - vmConf.Tracer = touchTracer - vmConf.Debug = true - - var ( - hook = func() error { - for _, accessTuple := range touchTracer.AccessList() { - if _, in := blacklist[accessTuple.Address]; in { - return errors.New("blacklist violation, tx trace") - } - } - return nil - } - - used = header.GasUsed - gasPool = new(core.GasPool).AddGas(gp.Gas()) - - bc = chData.chain - author = coinbase - statedb = envDiff.state - chainConfig = chData.chainConfig - - snap = envDiff.state.Snapshot() - ) - msg, err := core.TransactionToMessage(tx, types.MakeSigner(chData.chainConfig, header.Number), baseFee) - if err != nil { - return nil, 0, 0, err - } - // Create a new context to be used in the EVM environment - blockContext := core.NewEVMBlockContext(header, bc, &author) - vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, chainConfig, vmConf) - - txContext := core.NewEVMTxContext(msg) - vmenv.Reset(txContext, statedb) - result, err := core.ApplyMessage(vmenv, msg, gasPool) - if err != nil { - statedb.RevertToSnapshot(snap) - return nil, 0, 0, err - } - - err = hook() - if err != nil { - statedb.RevertToSnapshot(snap) - return nil, 0, 0, err - } - - used += result.UsedGas - - // Create a new receipt for the transaction, storing the intermediate root and gas used - // by the tx. - receipt := &types.Receipt{Type: tx.Type(), PostState: make([]byte, 0), CumulativeGasUsed: used} - if result.Failed() { - receipt.Status = types.ReceiptStatusFailed - } else { - receipt.Status = types.ReceiptStatusSuccessful - } - receipt.TxHash = tx.Hash() - receipt.GasUsed = result.UsedGas - - // If the transaction created a contract, store the creation address in the receipt. - if msg.To == nil { - receipt.ContractAddress = crypto.CreateAddress(vmenv.TxContext.Origin, tx.Nonce()) - } - - // Set the receipt logs and create the bloom filter. - receipt.Logs = statedb.GetLogs(tx.Hash(), header.Number.Uint64(), header.Hash()) - receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) - receipt.BlockHash = header.Hash() - receipt.BlockNumber = header.Number - receipt.TransactionIndex = uint(statedb.TxIndex()) - return receipt, used, gasPool.Gas(), nil - } - ) - - envDiff.state.Finalise(true) - var ( - gp = new(core.GasPool).AddGas(envDiff.gasPool.Gas()) - //cumulativeGas uint64 - txs = make([]*types.Transaction, 0, len(bundle.OriginalBundle.Txs)) - receipts = make([]*types.Receipt, 0, len(bundle.OriginalBundle.Txs)) - gasUsed uint64 - profitTally = new(big.Int) - - originalAccessList = envDiff.state.AccessList().Copy() - snap = envDiff.state.Snapshot() - accessLists = make(state.AccessLists, 0, len(bundle.OriginalBundle.Txs)) - revisions = make([]int, 0, len(bundle.OriginalBundle.Txs)) - - header = types.CopyHeader(envDiff.header) - ) - accessLists = accessLists.Append(originalAccessList) - revisions = append(revisions, snap) - for _, tx := range bundle.OriginalBundle.Txs { - if hasBaseFee && tx.Type() == types.DynamicFeeTxType { - // Sanity check for extremely large numbers - if tx.GasFeeCap().BitLen() > 256 { - bundleErr = core.ErrFeeCapVeryHigh - break - } - if tx.GasTipCap().BitLen() > 256 { - bundleErr = core.ErrTipVeryHigh - break - } - - // Ensure gasFeeCap is greater than or equal to gasTipCap. - if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { - bundleErr = core.ErrTipAboveFeeCap - break - } - } - - if tx.Value().Sign() == -1 { - bundleErr = core.ErrNegativeValue - break - } - - gasPrice, err := tx.EffectiveGasTip(envDiff.header.BaseFee) - if err != nil { - bundleErr = err - break - } - - _, err = types.Sender(envDiff.baseEnvironment.signer, tx) - if err != nil { - bundleErr = err - break - } - - if checkInterrupt(interrupt) { - bundleErr = errInterrupt - break - } - - // Try committing the transaction but without finalisation - receipt, cumulative, gasPool, err := TryApply(envDiff, chData, header, - header.BaseFee, tx, *chData.chain.GetVMConfig(), envDiff.gasPool) - accessLists = accessLists.Append(envDiff.state.AccessList().Copy()) - revisions = append(revisions, envDiff.state.Snapshot()) - if err != nil { - bundleErr = err - break - } - - if receipt.Status != types.ReceiptStatusSuccessful && !bundle.OriginalBundle.RevertingHash(tx.Hash()) { - bundleErr = errors.New("bundle tx revert") - break - } - - // Update the gas pool and the cumulative gas used - gp.SetGas(gasPool) - profitTally.Add(profitTally, new(big.Int).Mul(gasPrice, new(big.Int).SetUint64(receipt.GasUsed))) - header.GasUsed = cumulative - //cumulativeGas = cumulative - txs = append(txs, tx) - receipts = append(receipts, receipt) - gasUsed += receipt.GasUsed - } - - accessLists = accessLists.Append(envDiff.state.AccessList().Copy()) - if bundleErr != nil { - for i := len(accessLists) - 2; i > 0; i-- { - envDiff.state.RevertToSnapshotWithAccessList(revisions[i], accessLists[i+1]) - envDiff.state.SetAccessList(accessLists[i]) - } - - if len(accessLists) > 1 { - envDiff.state.RevertToSnapshotWithAccessList(revisions[0], accessLists[1]) - } - //envDiff.state.RevertToSnapshot(revisions[0]) - envDiff.state.SetAccessList(originalAccessList) - //envDiff.state.RevertToSnapshotWithAccessList(snap, tmpAccessList.Append(envDiff.state.AccessList())) - //envDiff.state.SetAccessList(originalAccessList) - //envDiff.state.RevertToSnapshot(snap) - return bundleErr - } - // skip profit calculation for now cause that gets complicated - // TODO: actually if we tally up the profit diffs and egp diffs we can calculate the profit without finalizing state - envDiff.state.Finalise(true) - envDiff.gasPool.SetGas(gp.Gas()) - envDiff.newTxs = append(envDiff.newTxs, txs...) - envDiff.newReceipts = append(envDiff.newReceipts, receipts...) - envDiff.newProfit = new(big.Int).Add(envDiff.newProfit, profitTally) - //envDiff.header.GasUsed += cumulativeGas - envDiff.header.GasUsed = header.GasUsed - return nil -} - // Commit Bundle to env diff func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chData chainData, interrupt *int32, algoConf algorithmConfig) error { coinbase := envDiff.baseEnvironment.coinbase diff --git a/miner/algo_greedy_buckets.go b/miner/algo_greedy_buckets.go index 34e7a57f7f..1a305c7f8b 100644 --- a/miner/algo_greedy_buckets.go +++ b/miner/algo_greedy_buckets.go @@ -13,6 +13,8 @@ import ( "github.com/ethereum/go-ethereum/params" ) +var errNoAlgorithmConfig = errors.New("no algorithm config specified") + // / To use it: // / 1. Copy relevant data from the worker // / 2. Call buildBlock @@ -25,20 +27,18 @@ type greedyBucketsBuilder struct { interrupt *int32 gasUsedMap map[*types.TxWithMinerFee]uint64 algoConf algorithmConfig + buildBlockFunc BuildBlockFunc } func newGreedyBucketsBuilder( chain *core.BlockChain, chainConfig *params.ChainConfig, algoConf *algorithmConfig, blacklist map[common.Address]struct{}, env *environment, key *ecdsa.PrivateKey, interrupt *int32, -) *greedyBucketsBuilder { +) (*greedyBucketsBuilder, error) { if algoConf == nil { - algoConf = &algorithmConfig{ - EnforceProfit: true, - ExpectedProfit: nil, - ProfitThresholdPercent: defaultProfitThreshold, - } + return nil, errNoAlgorithmConfig } - return &greedyBucketsBuilder{ + + builder := &greedyBucketsBuilder{ inputEnvironment: env, chainData: chainData{chainConfig: chainConfig, chain: chain, blacklist: blacklist}, builderKey: key, @@ -46,6 +46,56 @@ func newGreedyBucketsBuilder( gasUsedMap: make(map[*types.TxWithMinerFee]uint64), algoConf: *algoConf, } + var buildBlockFunc BuildBlockFunc + if algoConf.EnableMultiTxSnap { + buildBlockFunc = func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, + transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + + orders := types.NewTransactionsByPriceAndNonce(builder.inputEnvironment.signer, transactions, + simBundles, simSBundles, builder.inputEnvironment.header.BaseFee) + + usedBundles, usedSbundles := builder.buildMultiTxSnapBlock(orders) + return builder.inputEnvironment, usedBundles, usedSbundles + } + } else { + buildBlockFunc = func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, + transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + + orders := types.NewTransactionsByPriceAndNonce(builder.inputEnvironment.signer, transactions, + simBundles, simSBundles, builder.inputEnvironment.header.BaseFee) + + envDiff := newEnvironmentDiff(builder.inputEnvironment.copy()) + usedBundles, usedSbundles := builder.mergeOrdersIntoEnvDiff(envDiff, orders) + envDiff.applyToBaseEnv() + return envDiff.baseEnvironment, usedBundles, usedSbundles + } + } + + builder.buildBlockFunc = buildBlockFunc + return builder, nil +} + +// CheckRetryOrderAndReinsert checks if the order has been retried up to the retryLimit and if not, reinserts the order into the orders heap. +func CheckRetryOrderAndReinsert( + order *types.TxWithMinerFee, orders *types.TransactionsByPriceAndNonce, + retryMap map[*types.TxWithMinerFee]int, retryLimit int) bool { + + var isRetryable bool = false + if retryCount, exists := retryMap[order]; exists { + if retryCount != retryLimit { + isRetryable = true + retryMap[order] = retryCount + 1 + } + } else { + retryMap[order] = 0 + isRetryable = true + } + + if isRetryable { + orders.Push(order) + } + + return isRetryable } // CutoffPriceFromOrder returns the cutoff price for a given order based on the cutoff percent. @@ -68,28 +118,6 @@ func (b *greedyBucketsBuilder) commit(envDiff *environmentDiff, usedBundles []types.SimulatedBundle usedSbundles []types.UsedSBundle algoConf = b.algoConf - - CheckRetryOrderAndReinsert = func( - order *types.TxWithMinerFee, orders *types.TransactionsByPriceAndNonce, - retryMap map[*types.TxWithMinerFee]int, retryLimit int, - ) bool { - var isRetryable bool = false - if retryCount, exists := retryMap[order]; exists { - if retryCount != retryLimit { - isRetryable = true - retryMap[order] = retryCount + 1 - } - } else { - retryMap[order] = 0 - isRetryable = true - } - - if isRetryable { - orders.Push(order) - } - - return isRetryable - } ) for _, order := range transactions { @@ -119,8 +147,7 @@ func (b *greedyBucketsBuilder) commit(envDiff *environmentDiff, log.Trace("Included tx", "EGP", effGapPrice.String(), "gasUsed", receipt.GasUsed) } } else if bundle := order.Bundle(); bundle != nil { - //err := envDiff.commitBundle(bundle, b.chainData, b.interrupt, algoConf) - err := envDiff._bundle(bundle, b.chainData, b.interrupt, algoConf) + err := envDiff.commitBundle(bundle, b.chainData, b.interrupt, algoConf) if err != nil { log.Trace("Could not apply bundle", "bundle", bundle.OriginalBundle.Hash, "err", err) @@ -140,7 +167,7 @@ func (b *greedyBucketsBuilder) commit(envDiff *environmentDiff, } log.Trace("Included bundle", "bundleEGP", bundle.MevGasPrice.String(), - "gasUsed", bundle.TotalGasUsed, "ethToCoinbase", ethIntToFloat(bundle.TotalEth)) + "gasUsed", bundle.TotalGasUsed, "ethToCoinbase", ethIntToFloat(bundle.EthSentToCoinbase)) usedBundles = append(usedBundles, *bundle) } else if sbundle := order.SBundle(); sbundle != nil { usedEntry := types.UsedSBundle{ @@ -243,11 +270,71 @@ func (b *greedyBucketsBuilder) mergeOrdersIntoEnvDiff( return usedBundles, usedSbundles } -func (b *greedyBucketsBuilder) buildBlock(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - orders := types.NewTransactionsByPriceAndNonce(b.inputEnvironment.signer, transactions, simBundles, simSBundles, b.inputEnvironment.header.BaseFee) - //envDiff := newEnvironmentDiff(b.inputEnvironment.copy()) - envDiff := newEnvironmentDiff(b.inputEnvironment) - usedBundles, usedSbundles := b.mergeOrdersIntoEnvDiff(envDiff, orders) - envDiff.applyToBaseEnv() - return envDiff.baseEnvironment, usedBundles, usedSbundles +func (b *greedyBucketsBuilder) buildBlock(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, + transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + + return b.buildBlockFunc(simBundles, simSBundles, transactions) +} + +func (b *greedyBucketsBuilder) buildMultiTxSnapBlock(orders *types.TransactionsByPriceAndNonce) ([]types.SimulatedBundle, []types.UsedSBundle) { + var ( + usedBundles []types.SimulatedBundle + usedSbundles []types.UsedSBundle + orderFailed = false + ) + + for { + order := orders.Peek() + if order == nil { + break + } + + orderFailed = false + changes, err := newOrderApplyChanges(b.inputEnvironment) + if err != nil { + log.Error("Failed to create changes", "err", err) + return usedBundles, usedSbundles + } + + if tx := order.Tx(); tx != nil { + _, skip, err := changes.commitTx(tx, b.chainData) + switch skip { + case shiftTx: + orders.Shift() + case popTx: + orders.Pop() + } + + if err != nil { + log.Trace("Failed to commit tx with multi-transaction snapshot", "hash", tx.Hash(), "err", err) + orderFailed = true + } + } else if bundle := order.Bundle(); bundle != nil { + err = changes.commitBundle(bundle, b.chainData) + orders.Pop() + if err != nil { + log.Trace("Failed to commit bundle with multi-transaction snapshot", "bundle", bundle.OriginalBundle.Hash, "err", err) + orderFailed = true + } else { + usedBundles = append(usedBundles, *bundle) + } + } else if sbundle := order.SBundle(); sbundle != nil { + + } else { + // note: this should never happen because we should not be inserting invalid transaction types into + // the orders heap + panic("unsupported order type found") + } + + if orderFailed { + if err = changes.revert(); err != nil { + log.Error("Failed to revert changes with multi-transaction snapshot", "err", err) + } + } else { + if err = changes.apply(); err != nil { + log.Error("Failed to apply changes with multi-transaction snapshot", "err", err) + } + } + } + return usedBundles, usedSbundles } diff --git a/miner/algo_greedy_test.go b/miner/algo_greedy_test.go index c893fc83d0..49bb59fe14 100644 --- a/miner/algo_greedy_test.go +++ b/miner/algo_greedy_test.go @@ -30,7 +30,10 @@ func TestBuildBlockGasLimit(t *testing.T) { var result *environment switch algo { case ALGO_GREEDY_BUCKETS: - builder := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, nil, nil, env, nil, nil) + builder, err := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) + if err != nil { + t.Fatalf("Error creating greedy buckets builder: %v", err) + } result, _, _ = builder.buildBlock([]types.SimulatedBundle{}, nil, txs) case ALGO_GREEDY: builder := newGreedyBuilder(chData.chain, chData.chainConfig, nil, env, nil, nil) diff --git a/miner/algo_test.go b/miner/algo_test.go index 09ce0bef5f..0620ea37e4 100644 --- a/miner/algo_test.go +++ b/miner/algo_test.go @@ -229,7 +229,10 @@ func runAlgoTest(algo AlgoType, config *params.ChainConfig, alloc core.GenesisAl // build block switch algo { case ALGO_GREEDY_BUCKETS: - builder := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, nil, nil, env, nil, nil) + builder, err := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) + if err != nil { + return nil, err + } resultEnv, _, _ = builder.buildBlock(bundles, nil, txPool) case ALGO_GREEDY: builder := newGreedyBuilder(chData.chain, chData.chainConfig, nil, env, nil, nil) diff --git a/miner/order_apply_changes.go b/miner/order_apply_changes.go new file mode 100644 index 0000000000..b7ae71afdd --- /dev/null +++ b/miner/order_apply_changes.go @@ -0,0 +1,198 @@ +package miner + +import ( + "errors" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/tracers/logger" + "github.com/ethereum/go-ethereum/log" + "math/big" +) + +// orderApplyChanges is a helper struct to apply and revert changes to the environment +type orderApplyChanges struct { + env *environment + gasPool *core.GasPool + usedGas uint64 + profit *big.Int + txs []*types.Transaction + receipts []*types.Receipt +} + +func newOrderApplyChanges(env *environment) (*orderApplyChanges, error) { + if err := env.state.MultiTxSnapshot(); err != nil { + return nil, err + } + + return &orderApplyChanges{ + env: env, + gasPool: new(core.GasPool).AddGas(env.gasPool.Gas()), + usedGas: env.header.GasUsed, + profit: new(big.Int).Set(env.profit), + txs: make([]*types.Transaction, 0), + receipts: make([]*types.Receipt, 0), + }, nil +} + +func (c *orderApplyChanges) commitTx(tx *types.Transaction, chData chainData) (*types.Receipt, int, error) { + signer := c.env.signer + sender, err := types.Sender(signer, tx) + if err != nil { + return nil, popTx, err + } + + gasPrice, err := tx.EffectiveGasTip(c.env.header.BaseFee) + if err != nil { + return nil, shiftTx, err + } + + if _, in := chData.blacklist[sender]; in { + return nil, popTx, errors.New("blacklist violation, tx.sender") + } + + if to := tx.To(); to != nil { + if _, in := chData.blacklist[*to]; in { + return nil, popTx, errors.New("blacklist violation, tx.to") + } + } + + cfg := *chData.chain.GetVMConfig() + touchTracer := logger.NewAccountTouchTracer() + cfg.Tracer = touchTracer + cfg.Debug = true + + c.env.state.SetTxContext(tx.Hash(), c.env.tcount+len(c.txs)) + receipt, err := core.ApplyTransaction(chData.chainConfig, chData.chain, &c.env.coinbase, c.gasPool, c.env.state, c.env.header, tx, &c.usedGas, cfg, nil) + if err != nil { + switch { + case errors.Is(err, core.ErrGasLimitReached): + // Pop the current out-of-gas transaction without shifting in the next from the account + from, _ := types.Sender(signer, tx) + log.Trace("Gas limit exceeded for current block", "sender", from) + return receipt, popTx, err + + case errors.Is(err, core.ErrNonceTooLow): + // New head notification data race between the transaction pool and miner, shift + from, _ := types.Sender(signer, tx) + log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + return receipt, shiftTx, err + + case errors.Is(err, core.ErrNonceTooHigh): + // Reorg notification data race between the transaction pool and miner, skip account = + from, _ := types.Sender(signer, tx) + log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) + return receipt, popTx, err + + case errors.Is(err, core.ErrTxTypeNotSupported): + // Pop the unsupported transaction without shifting in the next from the account + from, _ := types.Sender(signer, tx) + log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) + return receipt, popTx, err + + default: + // Strange error, discard the transaction and get the next in line (note, the + // nonce-too-high clause will prevent us from executing in vain). + log.Trace("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + return receipt, shiftTx, err + } + } + + for _, address := range touchTracer.TouchedAddresses() { + if _, in := chData.blacklist[address]; in { + return nil, popTx, errors.New("blacklist violation, tx trace") + } + } + + c.profit.Add(c.profit, new(big.Int).Mul(new(big.Int).SetUint64(receipt.GasUsed), gasPrice)) + c.txs = append(c.txs, tx) + c.receipts = append(c.receipts, receipt) + + return receipt, shiftTx, nil +} + +func (c *orderApplyChanges) commitBundle(bundle *types.SimulatedBundle, chData chainData) error { + var ( + profitBefore = new(big.Int).Set(c.profit) + coinbaseBefore = new(big.Int).Set(c.env.state.GetBalance(c.env.coinbase)) + gasUsedBefore = c.usedGas + hasBaseFee = c.env.header.BaseFee != nil + + bundleErr error + ) + + for _, tx := range bundle.OriginalBundle.Txs { + if hasBaseFee && tx.Type() == types.DynamicFeeTxType { + // Sanity check for extremely large numbers + if tx.GasFeeCap().BitLen() > 256 { + bundleErr = core.ErrFeeCapVeryHigh + break + } + if tx.GasTipCap().BitLen() > 256 { + bundleErr = core.ErrTipVeryHigh + break + } + + // Ensure gasFeeCap is greater than or equal to gasTipCap. + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + bundleErr = core.ErrTipAboveFeeCap + break + } + } + receipt, _, err := c.commitTx(tx, chData) + + if err != nil { + log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) + bundleErr = err + break + } + + if receipt.Status != types.ReceiptStatusSuccessful && !bundle.OriginalBundle.RevertingHash(tx.Hash()) { + log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) + bundleErr = errors.New("bundle tx revert") + break + } + } + + if bundleErr != nil { + return bundleErr + } + + var ( + bundleProfit = new(big.Int).Sub(c.env.state.GetBalance(c.env.coinbase), coinbaseBefore) + gasUsed = c.usedGas - gasUsedBefore + + effGP = new(big.Int).Div(bundleProfit, new(big.Int).SetUint64(gasUsed)) + simEffGP = new(big.Int).Set(bundle.MevGasPrice) + ) + + // allow >-1% divergence + effGP.Mul(effGP, common.Big100) + simEffGP.Mul(simEffGP, big.NewInt(99)) + if simEffGP.Cmp(effGP) > 0 { + log.Trace("Bundle underpays after inclusion", "bundle", bundle.OriginalBundle.Hash) + return errors.New("bundle underpays") + } + + c.profit.Add(profitBefore, bundleProfit) + return nil +} + +// revert reverts all changes to the environment - every commit operation must be followed by a revert or apply operation +func (c *orderApplyChanges) revert() error { + return c.env.state.MultiTxSnapshotRevert() +} + +func (c *orderApplyChanges) apply() error { + if err := c.env.state.MultiTxSnapshotDiscard(); err != nil { + return err + } + + c.env.gasPool.SetGas(c.gasPool.Gas()) + c.env.header.GasUsed = c.usedGas + c.env.profit.Set(c.profit) + c.env.tcount += len(c.txs) + c.env.txs = append(c.env.txs, c.txs...) + c.env.receipts = append(c.env.receipts, c.receipts...) + return nil +} diff --git a/miner/worker.go b/miner/worker.go index d175850409..26770eedc4 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1415,11 +1415,15 @@ func (w *worker) fillTransactionsAlgoWorker(interrupt *int32, env *environment) ExpectedProfit: nil, ProfitThresholdPercent: defaultProfitThreshold, PriceCutoffPercent: priceCutoffPercent, + EnableMultiTxSnap: true, } - builder := newGreedyBucketsBuilder( + builder, err := newGreedyBucketsBuilder( w.chain, w.chainConfig, algoConf, w.blockList, env, w.config.BuilderTxSigningKey, interrupt, ) + if err != nil { + return nil, nil, nil, err + } newEnv, blockBundles, usedSbundle = builder.buildBlock(bundlesToConsider, sbundlesToConsider, pending) case ALGO_GREEDY: @@ -1499,7 +1503,7 @@ func (w *worker) generateWork(params *generateParams) (*types.Block, *big.Int, e totalSbundles++ } - log.Info("Block finalized and assembled", "blockProfit", ethIntToFloat(profit), + log.Info("Block finalized and assembled", "height", block.Number().String(), "blockProfit", ethIntToFloat(profit), "txs", len(env.txs), "bundles", len(blockBundles), "okSbundles", okSbundles, "totalSbundles", totalSbundles, "gasUsed", block.GasUsed(), "time", time.Since(start)) if metrics.EnabledBuilder { From c2a1a95c5bd7314d28a5fcff8afda21564dee01e Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Tue, 25 Jul 2023 20:14:07 -0500 Subject: [PATCH 10/46] Add additional unit tests and fix unit test errors, reduce duplicate code, add support for sbundles, expose CLI flag and environment variable to enable multi-tx-snapshots --- cmd/utils/flags.go | 9 + core/state/multi_tx_snapshot.go | 2 + miner/algo_common.go | 572 +++++++++----------------------- miner/algo_greedy.go | 49 ++- miner/algo_greedy_buckets.go | 78 +---- miner/algo_greedy_test.go | 5 +- miner/algo_test.go | 5 +- miner/env_changes.go | 418 +++++++++++++++++++++++ miner/env_changes_test.go | 384 +++++++++++++++++++++ miner/environment_diff.go | 399 ++++++++++++++++++++++ miner/miner.go | 38 ++- miner/order_apply_changes.go | 198 ----------- miner/worker.go | 14 +- 13 files changed, 1463 insertions(+), 708 deletions(-) create mode 100644 miner/env_changes.go create mode 100644 miner/env_changes_test.go create mode 100644 miner/environment_diff.go delete mode 100644 miner/order_apply_changes.go diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index e784e22d1a..68185dae25 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -709,6 +709,14 @@ var ( Usage: "Enable the builder", Category: flags.BuilderCategory, } + BuilderEnableMultiTxSnapshot = &cli.BoolFlag{ + Name: "builder.multi_tx_snapshot", + Usage: "Enable multi-transaction snapshots for block building, " + + "which decrease amount of state copying on bundle reverts (note: experimental)", + EnvVars: []string{"BUILDER_MULTI_TX_SNAPSHOT"}, + Value: ethconfig.Defaults.Miner.EnableMultiTransactionSnapshot, + Category: flags.BuilderCategory, + } BuilderEnableValidatorChecks = &cli.BoolFlag{ Name: "builder.validator_checks", Usage: "Enable the validator checks", @@ -1906,6 +1914,7 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { } cfg.PriceCutoffPercent = ctx.Int(MinerPriceCutoffPercentFlag.Name) + cfg.EnableMultiTransactionSnapshot = ctx.Bool(BuilderEnableMultiTxSnapshot.Name) } func setRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) { diff --git a/core/state/multi_tx_snapshot.go b/core/state/multi_tx_snapshot.go index b5c9e76f3f..eabfc06993 100644 --- a/core/state/multi_tx_snapshot.go +++ b/core/state/multi_tx_snapshot.go @@ -24,8 +24,10 @@ type MultiTxSnapshot struct { accountNotPending map[common.Address]struct{} accountNotDirty map[common.Address]struct{} + // TODO: snapdestructs, snapaccount storage } +// NewMultiTxSnapshot creates a new MultiTxSnapshot func NewMultiTxSnapshot() *MultiTxSnapshot { return &MultiTxSnapshot{ numLogsAdded: make(map[common.Hash]int), diff --git a/miner/algo_common.go b/miner/algo_common.go index 378b0e403e..b4d78d7977 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -44,11 +44,11 @@ var ( var emptyCodeHash = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") -var errInterrupt = errors.New("miner worker interrupted") - -// BuildBlockFunc is the function signature for building a block -type BuildBlockFunc func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, - transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) +var ( + errInterrupt = errors.New("miner worker interrupted") + errNoAlgorithmConfig = errors.New("no algorithm configuration specified") + errNoPrivateKey = errors.New("no private key provided") +) // lowProfitError is returned when an order is not committed due to low profit or low effective gas price type lowProfitError struct { @@ -66,75 +66,55 @@ func (e *lowProfitError) Error() string { ) } -type algorithmConfig struct { - // EnforceProfit is true if we want to enforce a minimum profit threshold - // for committing a transaction based on ProfitThresholdPercent - EnforceProfit bool - // ExpectedProfit should be set on a per transaction basis when profit is enforced - ExpectedProfit *big.Int - // ProfitThresholdPercent is the minimum profit threshold for committing a transaction - ProfitThresholdPercent *big.Int - // PriceCutoffPercent is the minimum effective gas price threshold used for bucketing transactions by price. - // For example if the top transaction in a list has an effective gas price of 1000 wei and PriceCutoffPercent - // is 10 (i.e. 10%), then the minimum effective gas price included in the same bucket as the top transaction - // is (1000 * 10%) = 100 wei. - PriceCutoffPercent int - - EnableMultiTxSnap bool -} - -type chainData struct { - chainConfig *params.ChainConfig - chain *core.BlockChain - blacklist map[common.Address]struct{} -} - -type environmentDiff struct { - baseEnvironment *environment - header *types.Header - gasPool *core.GasPool // available gas used to pack transactions - state *state.StateDB // apply state changes here - newProfit *big.Int - newTxs []*types.Transaction - newReceipts []*types.Receipt -} - -func newEnvironmentDiff(env *environment) *environmentDiff { - gasPool := new(core.GasPool).AddGas(env.gasPool.Gas()) - return &environmentDiff{ - baseEnvironment: env, - header: types.CopyHeader(env.header), - gasPool: gasPool, - state: env.state.Copy(), - newProfit: new(big.Int), +type ( + algorithmConfig struct { + // EnforceProfit is true if we want to enforce a minimum profit threshold + // for committing a transaction based on ProfitThresholdPercent + EnforceProfit bool + // ExpectedProfit should be set on a per-transaction basis when profit is enforced + ExpectedProfit *big.Int + // ProfitThresholdPercent is the minimum profit threshold for committing a transaction + ProfitThresholdPercent *big.Int + // PriceCutoffPercent is the minimum effective gas price threshold used for bucketing transactions by price. + // For example if the top transaction in a list has an effective gas price of 1000 wei and PriceCutoffPercent + // is 10 (i.e. 10%), then the minimum effective gas price included in the same bucket as the top transaction + // is (1000 * 10%) = 100 wei. + PriceCutoffPercent int + // EnableMultiTxSnap is true if we want to use multi-transaction snapshot + // for committing transactions (note: experimental) + EnableMultiTxSnap bool + } + + chainData struct { + chainConfig *params.ChainConfig + chain *core.BlockChain + blacklist map[common.Address]struct{} + } + + // BuildBlockFunc is the function signature for building a block + BuildBlockFunc func( + simBundles []types.SimulatedBundle, + simSBundles []*types.SimSBundle, + transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) + + // CommitTxFunc is the function signature for committing a transaction + CommitTxFunc func(*types.Transaction, chainData) (*types.Receipt, int, error) + + // PayoutTransactionParams holds parameters for committing a payout transaction, used in commitPayoutTx + PayoutTransactionParams struct { + Amount *big.Int + BaseFee *big.Int + ChainData chainData + Gas uint64 + CommitFn CommitTxFunc + Receiver common.Address + Sender common.Address + SenderBalance *big.Int + SenderNonce uint64 + Signer types.Signer + PrivateKey *ecdsa.PrivateKey } -} - -func (envDiff *environmentDiff) copy() *environmentDiff { - gasPool := new(core.GasPool).AddGas(envDiff.gasPool.Gas()) - - return &environmentDiff{ - baseEnvironment: envDiff.baseEnvironment.copy(), - header: types.CopyHeader(envDiff.header), - gasPool: gasPool, - state: envDiff.state.Copy(), - newProfit: new(big.Int).Set(envDiff.newProfit), - newTxs: envDiff.newTxs[:], - newReceipts: envDiff.newReceipts[:], - } -} - -func (envDiff *environmentDiff) applyToBaseEnv() { - env := envDiff.baseEnvironment - env.gasPool = new(core.GasPool).AddGas(envDiff.gasPool.Gas()) - env.header = envDiff.header - env.state.StopPrefetcher() - env.state = envDiff.state - env.profit.Add(env.profit, envDiff.newProfit) - env.tcount += len(envDiff.newTxs) - env.txs = append(env.txs, envDiff.newTxs...) - env.receipts = append(env.receipts, envDiff.newReceipts...) -} +) func checkInterrupt(i *int32) bool { return i != nil && atomic.LoadInt32(i) != commitInterruptNone @@ -198,165 +178,6 @@ func applyTransactionWithBlacklist(signer types.Signer, config *params.ChainConf return receipt, statedb, err } -// commit tx to envDiff -func (envDiff *environmentDiff) commitTx(tx *types.Transaction, chData chainData) (*types.Receipt, int, error) { - header := envDiff.header - coinbase := &envDiff.baseEnvironment.coinbase - signer := envDiff.baseEnvironment.signer - - gasPrice, err := tx.EffectiveGasTip(header.BaseFee) - if err != nil { - return nil, shiftTx, err - } - - envDiff.state.SetTxContext(tx.Hash(), envDiff.baseEnvironment.tcount+len(envDiff.newTxs)) - - receipt, newState, err := applyTransactionWithBlacklist(signer, chData.chainConfig, chData.chain, coinbase, - envDiff.gasPool, envDiff.state, header, tx, &header.GasUsed, *chData.chain.GetVMConfig(), chData.blacklist) - - envDiff.state = newState - if err != nil { - switch { - case errors.Is(err, core.ErrGasLimitReached): - // Pop the current out-of-gas transaction without shifting in the next from the account - from, _ := types.Sender(signer, tx) - log.Trace("Gas limit exceeded for current block", "sender", from) - return receipt, popTx, err - - case errors.Is(err, core.ErrNonceTooLow): - // New head notification data race between the transaction pool and miner, shift - from, _ := types.Sender(signer, tx) - log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) - return receipt, shiftTx, err - - case errors.Is(err, core.ErrNonceTooHigh): - // Reorg notification data race between the transaction pool and miner, skip account = - from, _ := types.Sender(signer, tx) - log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) - return receipt, popTx, err - - case errors.Is(err, core.ErrTxTypeNotSupported): - // Pop the unsupported transaction without shifting in the next from the account - from, _ := types.Sender(signer, tx) - log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) - return receipt, popTx, err - - default: - // Strange error, discard the transaction and get the next in line (note, the - // nonce-too-high clause will prevent us from executing in vain). - log.Trace("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) - return receipt, shiftTx, err - } - } - - envDiff.newProfit = envDiff.newProfit.Add(envDiff.newProfit, gasPrice.Mul(gasPrice, big.NewInt(int64(receipt.GasUsed)))) - envDiff.newTxs = append(envDiff.newTxs, tx) - envDiff.newReceipts = append(envDiff.newReceipts, receipt) - - return receipt, shiftTx, nil -} - -// Commit Bundle to env diff -func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chData chainData, interrupt *int32, algoConf algorithmConfig) error { - coinbase := envDiff.baseEnvironment.coinbase - tmpEnvDiff := envDiff.copy() - - coinbaseBalanceBefore := tmpEnvDiff.state.GetBalance(coinbase) - - profitBefore := new(big.Int).Set(tmpEnvDiff.newProfit) - var gasUsed uint64 - - for _, tx := range bundle.OriginalBundle.Txs { - if tmpEnvDiff.header.BaseFee != nil && tx.Type() == types.DynamicFeeTxType { - // Sanity check for extremely large numbers - if tx.GasFeeCap().BitLen() > 256 { - return core.ErrFeeCapVeryHigh - } - if tx.GasTipCap().BitLen() > 256 { - return core.ErrTipVeryHigh - } - // Ensure gasFeeCap is greater than or equal to gasTipCap. - if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { - return core.ErrTipAboveFeeCap - } - } - - if tx.Value().Sign() == -1 { - return core.ErrNegativeValue - } - - _, err := tx.EffectiveGasTip(envDiff.header.BaseFee) - if err != nil { - return err - } - - _, err = types.Sender(envDiff.baseEnvironment.signer, tx) - if err != nil { - return err - } - - if checkInterrupt(interrupt) { - return errInterrupt - } - - receipt, _, err := tmpEnvDiff.commitTx(tx, chData) - - if err != nil { - log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) - return err - } - - if receipt.Status != types.ReceiptStatusSuccessful && !bundle.OriginalBundle.RevertingHash(tx.Hash()) { - log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) - return errors.New("bundle tx revert") - } - - gasUsed += receipt.GasUsed - } - coinbaseBalanceAfter := tmpEnvDiff.state.GetBalance(coinbase) - coinbaseBalanceDelta := new(big.Int).Sub(coinbaseBalanceAfter, coinbaseBalanceBefore) - tmpEnvDiff.newProfit.Add(profitBefore, coinbaseBalanceDelta) - - bundleProfit := coinbaseBalanceDelta - - bundleActualEffGP := bundleProfit.Div(bundleProfit, big.NewInt(int64(gasUsed))) - bundleSimEffGP := new(big.Int).Set(bundle.MevGasPrice) - - // allow >-1% divergence - actualEGP := new(big.Int).Mul(bundleActualEffGP, common.Big100) // bundle actual effective gas price * 100 - simulatedEGP := new(big.Int).Mul(bundleSimEffGP, big.NewInt(99)) // bundle simulated effective gas price * 99 - - if simulatedEGP.Cmp(actualEGP) > 0 { - log.Trace("Bundle underpays after inclusion", "bundle", bundle.OriginalBundle.Hash) - return &lowProfitError{ - ExpectedEffectiveGasPrice: bundleSimEffGP, - ActualEffectiveGasPrice: bundleActualEffGP, - } - } - - if algoConf.EnforceProfit { - // if profit is enforced between simulation and actual commit, only allow ProfitThresholdPercent divergence - simulatedBundleProfit := new(big.Int).Set(bundle.TotalEth) - actualBundleProfit := new(big.Int).Mul(bundleActualEffGP, big.NewInt(int64(gasUsed))) - - // We want to make simulated profit smaller to allow for some leeway in cases where the actual profit is - // lower due to transaction ordering - simulatedProfitMultiple := new(big.Int).Mul(simulatedBundleProfit, algoConf.ProfitThresholdPercent) - actualProfitMultiple := new(big.Int).Mul(actualBundleProfit, common.Big100) - - if simulatedProfitMultiple.Cmp(actualProfitMultiple) > 0 { - log.Trace("Lower bundle profit found after inclusion", "bundle", bundle.OriginalBundle.Hash) - return &lowProfitError{ - ExpectedProfit: simulatedBundleProfit, - ActualProfit: actualBundleProfit, - } - } - } - - *envDiff = *tmpEnvDiff - return nil -} - func estimatePayoutTxGas(env *environment, sender, receiver common.Address, prv *ecdsa.PrivateKey, chData chainData) (uint64, bool, error) { if codeHash := env.state.GetCodeHash(receiver); codeHash == (common.Hash{}) || codeHash == emptyCodeHash { return params.TxGas, true, nil @@ -390,6 +211,43 @@ func applyPayoutTx(envDiff *environmentDiff, sender, receiver common.Address, ga return rec, nil } +func commitPayoutTx(parameters PayoutTransactionParams) (*types.Receipt, error) { + if parameters.Gas < params.TxGas { + return nil, errors.New("not enough gas for intrinsic gas cost") + } + + requiredBalance := new(big.Int).Mul(parameters.BaseFee, new(big.Int).SetUint64(parameters.Gas)) + requiredBalance = requiredBalance.Add(requiredBalance, parameters.Amount) + if requiredBalance.Cmp(parameters.SenderBalance) > 0 { + return nil, errors.New("not enough balance") + } + + tx, err := types.SignNewTx(parameters.PrivateKey, parameters.Signer, &types.DynamicFeeTx{ + ChainID: parameters.ChainData.chainConfig.ChainID, + Nonce: parameters.SenderNonce, + GasTipCap: new(big.Int), + GasFeeCap: parameters.BaseFee, + Gas: parameters.Gas, + To: ¶meters.Receiver, + Value: parameters.Amount, + }) + if err != nil { + return nil, err + } + + txSender, err := types.Sender(parameters.Signer, tx) + if err != nil { + return nil, err + } + + if txSender != parameters.Sender { + return nil, errors.New("incorrect sender private key") + } + + receipt, _, err := parameters.CommitFn(tx, parameters.ChainData) + return receipt, err +} + func insertPayoutTx(env *environment, sender, receiver common.Address, gas uint64, isEOA bool, availableFunds *big.Int, prv *ecdsa.PrivateKey, chData chainData) (*types.Receipt, error) { if isEOA { diff := newEnvironmentDiff(env) @@ -433,208 +291,90 @@ func insertPayoutTx(env *environment, sender, receiver common.Address, gas uint6 return nil, err } -func (envDiff *environmentDiff) commitPayoutTx(amount *big.Int, sender, receiver common.Address, gas uint64, prv *ecdsa.PrivateKey, chData chainData) (*types.Receipt, error) { - senderBalance := envDiff.state.GetBalance(sender) - - if gas < params.TxGas { - return nil, errors.New("not enough gas for intrinsic gas cost") - } - - requiredBalance := new(big.Int).Mul(envDiff.header.BaseFee, new(big.Int).SetUint64(gas)) - requiredBalance = requiredBalance.Add(requiredBalance, amount) - if requiredBalance.Cmp(senderBalance) > 0 { - return nil, errors.New("not enough balance") - } - - signer := envDiff.baseEnvironment.signer - tx, err := types.SignNewTx(prv, signer, &types.DynamicFeeTx{ - ChainID: chData.chainConfig.ChainID, - Nonce: envDiff.state.GetNonce(sender), - GasTipCap: new(big.Int), - GasFeeCap: envDiff.header.BaseFee, - Gas: gas, - To: &receiver, - Value: amount, - }) - if err != nil { - return nil, err - } - - txSender, err := types.Sender(signer, tx) - if err != nil { - return nil, err - } - if txSender != sender { - return nil, errors.New("incorrect sender private key") - } - - receipt, _, err := envDiff.commitTx(tx, chData) - if err != nil { - return nil, err - } - - return receipt, nil -} - -func (envDiff *environmentDiff) commitSBundle(b *types.SimSBundle, chData chainData, interrupt *int32, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { - if key == nil { - return errors.New("no private key provided") - } - - tmpEnvDiff := envDiff.copy() - - coinbaseBefore := tmpEnvDiff.state.GetBalance(tmpEnvDiff.header.Coinbase) - gasBefore := tmpEnvDiff.gasPool.Gas() - - if err := tmpEnvDiff.commitSBundleInner(b.Bundle, chData, interrupt, key); err != nil { - return err - } - - coinbaseAfter := tmpEnvDiff.state.GetBalance(tmpEnvDiff.header.Coinbase) - gasAfter := tmpEnvDiff.gasPool.Gas() +// BuildMultiTxSnapBlock attempts to build a block with input orders using state.MultiTxSnapshot. If a failure occurs attempting to commit a given order, +// it reverts to previous state and the next order is attempted. +func BuildMultiTxSnapBlock( + inputEnvironment *environment, + key *ecdsa.PrivateKey, + chData chainData, + algoConf algorithmConfig, + orders *types.TransactionsByPriceAndNonce) ([]types.SimulatedBundle, []types.UsedSBundle, error) { - coinbaseDelta := new(big.Int).Sub(coinbaseAfter, coinbaseBefore) - gasDelta := new(big.Int).SetUint64(gasBefore - gasAfter) - - if coinbaseDelta.Cmp(common.Big0) < 0 { - return errors.New("coinbase balance decreased") - } - - gotEGP := new(big.Int).Div(coinbaseDelta, gasDelta) - simEGP := new(big.Int).Set(b.MevGasPrice) - - // allow > 1% difference - actualEGP := new(big.Int).Mul(gotEGP, big.NewInt(101)) - simulatedEGP := new(big.Int).Mul(simEGP, common.Big100) + var ( + usedBundles []types.SimulatedBundle + usedSbundles []types.UsedSBundle + orderFailed = false + buildBlockErrors []error + ) - if simulatedEGP.Cmp(actualEGP) > 0 { - return &lowProfitError{ - ExpectedEffectiveGasPrice: simEGP, - ActualEffectiveGasPrice: gotEGP, + for { + order := orders.Peek() + if order == nil { + break } - } - if algoConf.EnforceProfit { - // if profit is enforced between simulation and actual commit, only allow >-1% divergence - simulatedSbundleProfit := new(big.Int).Set(b.Profit) - actualSbundleProfit := new(big.Int).Set(coinbaseDelta) - - // We want to make simulated profit smaller to allow for some leeway in cases where the actual profit is - // lower due to transaction ordering - simulatedProfitMultiple := new(big.Int).Mul(simulatedSbundleProfit, algoConf.ProfitThresholdPercent) - actualProfitMultiple := new(big.Int).Mul(actualSbundleProfit, common.Big100) - - if simulatedProfitMultiple.Cmp(actualProfitMultiple) > 0 { - log.Trace("Lower sbundle profit found after inclusion", "sbundle", b.Bundle.Hash()) - return &lowProfitError{ - ExpectedProfit: simulatedSbundleProfit, - ActualProfit: actualSbundleProfit, - } + orderFailed = false + changes, err := newEnvChanges(inputEnvironment) + // if changes cannot be instantiated, return early + if err != nil { + log.Error("Failed to create changes", "err", err) + return nil, nil, err } - } - *envDiff = *tmpEnvDiff - return nil -} - -func (envDiff *environmentDiff) commitSBundleInner(b *types.SBundle, chData chainData, interrupt *int32, key *ecdsa.PrivateKey) error { - // check inclusion - minBlock := b.Inclusion.BlockNumber - maxBlock := b.Inclusion.MaxBlockNumber - if current := envDiff.header.Number.Uint64(); current < minBlock || current > maxBlock { - return fmt.Errorf("bundle inclusion block number out of range: %d <= %d <= %d", minBlock, current, maxBlock) - } - - // extract constraints into convenient format - refundIdx := make([]bool, len(b.Body)) - refundPercents := make([]int, len(b.Body)) - for _, el := range b.Validity.Refund { - refundIdx[el.BodyIdx] = true - refundPercents[el.BodyIdx] = el.Percent - } - - var ( - totalProfit *big.Int = new(big.Int) - refundableProfit *big.Int = new(big.Int) - ) - - var ( - coinbaseDelta = new(big.Int) - coinbaseBefore *big.Int - ) - // insert body and check it - for i, el := range b.Body { - coinbaseDelta.Set(common.Big0) - coinbaseBefore = envDiff.state.GetBalance(envDiff.header.Coinbase) + // TODO: add support for retry logic + if tx := order.Tx(); tx != nil { + _, skip, err := changes.commitTx(tx, chData) + switch skip { + case shiftTx: + orders.Shift() + case popTx: + orders.Pop() + } - if el.Tx != nil { - receipt, _, err := envDiff.commitTx(el.Tx, chData) if err != nil { - return err + buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to commit tx: %w", err)) + orderFailed = true } - if receipt.Status != types.ReceiptStatusSuccessful && !el.CanRevert { - return errors.New("tx failed") + } else if bundle := order.Bundle(); bundle != nil { + err = changes.commitBundle(bundle, chData) + orders.Pop() + if err != nil { + buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to commit bundle: %w", err)) + orderFailed = true + } else { + usedBundles = append(usedBundles, *bundle) } - } else if el.Bundle != nil { - err := envDiff.commitSBundleInner(el.Bundle, chData, interrupt, key) + } else if sbundle := order.SBundle(); sbundle != nil { + usedEntry := types.UsedSBundle{ + Bundle: sbundle.Bundle, + } + err = changes.CommitSBundle(sbundle, chData, key, algoConf) if err != nil { - return err + buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to commit sbundle: %w", err)) + orderFailed = true + usedEntry.Success = false + } else { + usedEntry.Success = true } + usedSbundles = append(usedSbundles, usedEntry) } else { - return errors.New("invalid body element") - } - - coinbaseDelta.Set(envDiff.state.GetBalance(envDiff.header.Coinbase)) - coinbaseDelta.Sub(coinbaseDelta, coinbaseBefore) - - totalProfit.Add(totalProfit, coinbaseDelta) - if !refundIdx[i] { - refundableProfit.Add(refundableProfit, coinbaseDelta) - } - } - - // enforce constraints - coinbaseDelta.Set(common.Big0) - coinbaseBefore = envDiff.state.GetBalance(envDiff.header.Coinbase) - for i, el := range refundPercents { - if !refundIdx[i] { - continue - } - refundConfig, err := types.GetRefundConfig(&b.Body[i], envDiff.baseEnvironment.signer) - if err != nil { - return err - } - - maxPayoutCost := new(big.Int).Set(core.SbundlePayoutMaxCost) - maxPayoutCost.Mul(maxPayoutCost, big.NewInt(int64(len(refundConfig)))) - maxPayoutCost.Mul(maxPayoutCost, envDiff.header.BaseFee) - - allocatedValue := common.PercentOf(refundableProfit, el) - allocatedValue.Sub(allocatedValue, maxPayoutCost) - - if allocatedValue.Cmp(common.Big0) < 0 { - return fmt.Errorf("negative payout") + // note: this should never happen because we should not be inserting invalid transaction types into + // the orders heap + panic("unsupported order type found") } - for _, refund := range refundConfig { - refundValue := common.PercentOf(allocatedValue, refund.Percent) - refundReceiver := refund.Address - rec, err := envDiff.commitPayoutTx(refundValue, envDiff.header.Coinbase, refundReceiver, core.SbundlePayoutMaxCostInt, key, chData) - if err != nil { - return err + if orderFailed { + if err = changes.revert(); err != nil { + log.Error("Failed to revert changes with multi-transaction snapshot", "err", err) + buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to revert changes: %w", err)) } - if rec.Status != types.ReceiptStatusSuccessful { - return fmt.Errorf("refund tx failed") + } else { + if err = changes.apply(); err != nil { + log.Error("Failed to apply changes with multi-transaction snapshot", "err", err) + buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to apply changes: %w", err)) } - log.Trace("Committed kickback", "payout", ethIntToFloat(allocatedValue), "receiver", refundReceiver) } } - coinbaseDelta.Set(envDiff.state.GetBalance(envDiff.header.Coinbase)) - coinbaseDelta.Sub(coinbaseDelta, coinbaseBefore) - totalProfit.Add(totalProfit, coinbaseDelta) - if totalProfit.Cmp(common.Big0) < 0 { - return fmt.Errorf("negative profit") - } - return nil + return usedBundles, usedSbundles, errors.Join(buildBlockErrors...) } diff --git a/miner/algo_greedy.go b/miner/algo_greedy.go index 508f4f6d52..a2954bcc37 100644 --- a/miner/algo_greedy.go +++ b/miner/algo_greedy.go @@ -20,18 +20,59 @@ type greedyBuilder struct { chainData chainData builderKey *ecdsa.PrivateKey interrupt *int32 + buildBlockFunc BuildBlockFunc + algoConf algorithmConfig } func newGreedyBuilder( - chain *core.BlockChain, chainConfig *params.ChainConfig, + chain *core.BlockChain, chainConfig *params.ChainConfig, algoConf *algorithmConfig, blacklist map[common.Address]struct{}, env *environment, key *ecdsa.PrivateKey, interrupt *int32, -) *greedyBuilder { - return &greedyBuilder{ +) (*greedyBuilder, error) { + + if algoConf == nil { + return nil, errNoAlgorithmConfig + } + + builder := &greedyBuilder{ inputEnvironment: env, - chainData: chainData{chainConfig, chain, blacklist}, + chainData: chainData{chainConfig: chainConfig, chain: chain, blacklist: blacklist}, builderKey: key, interrupt: interrupt, + algoConf: *algoConf, } + // Initialize block builder function + var buildBlockFunc BuildBlockFunc + if algoConf.EnableMultiTxSnap { + buildBlockFunc = func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + orders := types.NewTransactionsByPriceAndNonce(builder.inputEnvironment.signer, transactions, + simBundles, simSBundles, builder.inputEnvironment.header.BaseFee) + + usedBundles, usedSbundles, err := BuildMultiTxSnapBlock( + builder.inputEnvironment, + builder.builderKey, + builder.chainData, + builder.algoConf, + orders, + ) + if err != nil { + log.Debug("Error(s) building multi-tx snapshot block", "err", err) + } + return builder.inputEnvironment, usedBundles, usedSbundles + } + } else { + buildBlockFunc = func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + orders := types.NewTransactionsByPriceAndNonce(builder.inputEnvironment.signer, transactions, + simBundles, simSBundles, builder.inputEnvironment.header.BaseFee) + + envDiff := newEnvironmentDiff(builder.inputEnvironment.copy()) + usedBundles, usedSbundles := builder.mergeOrdersIntoEnvDiff(envDiff, orders) + envDiff.applyToBaseEnv() + return envDiff.baseEnvironment, usedBundles, usedSbundles + } + } + + builder.buildBlockFunc = buildBlockFunc + return builder, nil } func (b *greedyBuilder) mergeOrdersIntoEnvDiff( diff --git a/miner/algo_greedy_buckets.go b/miner/algo_greedy_buckets.go index 1a305c7f8b..c03492a5d1 100644 --- a/miner/algo_greedy_buckets.go +++ b/miner/algo_greedy_buckets.go @@ -13,8 +13,6 @@ import ( "github.com/ethereum/go-ethereum/params" ) -var errNoAlgorithmConfig = errors.New("no algorithm config specified") - // / To use it: // / 1. Copy relevant data from the worker // / 2. Call buildBlock @@ -46,6 +44,8 @@ func newGreedyBucketsBuilder( gasUsedMap: make(map[*types.TxWithMinerFee]uint64), algoConf: *algoConf, } + + // Initialize block builder function var buildBlockFunc BuildBlockFunc if algoConf.EnableMultiTxSnap { buildBlockFunc = func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, @@ -54,7 +54,16 @@ func newGreedyBucketsBuilder( orders := types.NewTransactionsByPriceAndNonce(builder.inputEnvironment.signer, transactions, simBundles, simSBundles, builder.inputEnvironment.header.BaseFee) - usedBundles, usedSbundles := builder.buildMultiTxSnapBlock(orders) + usedBundles, usedSbundles, err := BuildMultiTxSnapBlock( + builder.inputEnvironment, + builder.builderKey, + builder.chainData, + builder.algoConf, + orders, + ) + if err != nil { + log.Debug("Error(s) building multi-tx snapshot block", "err", err) + } return builder.inputEnvironment, usedBundles, usedSbundles } } else { @@ -275,66 +284,3 @@ func (b *greedyBucketsBuilder) buildBlock(simBundles []types.SimulatedBundle, si return b.buildBlockFunc(simBundles, simSBundles, transactions) } - -func (b *greedyBucketsBuilder) buildMultiTxSnapBlock(orders *types.TransactionsByPriceAndNonce) ([]types.SimulatedBundle, []types.UsedSBundle) { - var ( - usedBundles []types.SimulatedBundle - usedSbundles []types.UsedSBundle - orderFailed = false - ) - - for { - order := orders.Peek() - if order == nil { - break - } - - orderFailed = false - changes, err := newOrderApplyChanges(b.inputEnvironment) - if err != nil { - log.Error("Failed to create changes", "err", err) - return usedBundles, usedSbundles - } - - if tx := order.Tx(); tx != nil { - _, skip, err := changes.commitTx(tx, b.chainData) - switch skip { - case shiftTx: - orders.Shift() - case popTx: - orders.Pop() - } - - if err != nil { - log.Trace("Failed to commit tx with multi-transaction snapshot", "hash", tx.Hash(), "err", err) - orderFailed = true - } - } else if bundle := order.Bundle(); bundle != nil { - err = changes.commitBundle(bundle, b.chainData) - orders.Pop() - if err != nil { - log.Trace("Failed to commit bundle with multi-transaction snapshot", "bundle", bundle.OriginalBundle.Hash, "err", err) - orderFailed = true - } else { - usedBundles = append(usedBundles, *bundle) - } - } else if sbundle := order.SBundle(); sbundle != nil { - - } else { - // note: this should never happen because we should not be inserting invalid transaction types into - // the orders heap - panic("unsupported order type found") - } - - if orderFailed { - if err = changes.revert(); err != nil { - log.Error("Failed to revert changes with multi-transaction snapshot", "err", err) - } - } else { - if err = changes.apply(); err != nil { - log.Error("Failed to apply changes with multi-transaction snapshot", "err", err) - } - } - } - return usedBundles, usedSbundles -} diff --git a/miner/algo_greedy_test.go b/miner/algo_greedy_test.go index 49bb59fe14..4b460f0adc 100644 --- a/miner/algo_greedy_test.go +++ b/miner/algo_greedy_test.go @@ -36,7 +36,10 @@ func TestBuildBlockGasLimit(t *testing.T) { } result, _, _ = builder.buildBlock([]types.SimulatedBundle{}, nil, txs) case ALGO_GREEDY: - builder := newGreedyBuilder(chData.chain, chData.chainConfig, nil, env, nil, nil) + builder, err := newGreedyBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) + if err != nil { + t.Fatalf("Error creating greedy builder: %v", err) + } result, _, _ = builder.buildBlock([]types.SimulatedBundle{}, nil, txs) } diff --git a/miner/algo_test.go b/miner/algo_test.go index 0620ea37e4..d432e15b4a 100644 --- a/miner/algo_test.go +++ b/miner/algo_test.go @@ -235,7 +235,10 @@ func runAlgoTest(algo AlgoType, config *params.ChainConfig, alloc core.GenesisAl } resultEnv, _, _ = builder.buildBlock(bundles, nil, txPool) case ALGO_GREEDY: - builder := newGreedyBuilder(chData.chain, chData.chainConfig, nil, env, nil, nil) + builder, err := newGreedyBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) + if err != nil { + return nil, err + } resultEnv, _, _ = builder.buildBlock(bundles, nil, txPool) } return resultEnv.profit, nil diff --git a/miner/env_changes.go b/miner/env_changes.go new file mode 100644 index 0000000000..85baaade32 --- /dev/null +++ b/miner/env_changes.go @@ -0,0 +1,418 @@ +package miner + +import ( + "crypto/ecdsa" + "errors" + "fmt" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/tracers/logger" + "github.com/ethereum/go-ethereum/log" + "math/big" +) + +// envChanges is a helper struct to apply and revert changes to the environment +type envChanges struct { + env *environment + gasPool *core.GasPool + usedGas uint64 + profit *big.Int + txs []*types.Transaction + receipts []*types.Receipt +} + +func newEnvChanges(env *environment) (*envChanges, error) { + if err := env.state.MultiTxSnapshot(); err != nil { + return nil, err + } + + return &envChanges{ + env: env, + gasPool: new(core.GasPool).AddGas(env.gasPool.Gas()), + usedGas: env.header.GasUsed, + profit: new(big.Int).Set(env.profit), + txs: make([]*types.Transaction, 0), + receipts: make([]*types.Receipt, 0), + }, nil +} + +func (c *envChanges) commitPayoutTx( + amount *big.Int, sender, receiver common.Address, + gas uint64, prv *ecdsa.PrivateKey, chData chainData) (*types.Receipt, error) { + return commitPayoutTx(PayoutTransactionParams{ + Amount: amount, + BaseFee: c.env.header.BaseFee, + ChainData: chData, + Gas: gas, + CommitFn: c.commitTx, + Receiver: receiver, + Sender: sender, + SenderBalance: c.env.state.GetBalance(sender), + SenderNonce: c.env.state.GetNonce(sender), + Signer: c.env.signer, + PrivateKey: prv, + }) +} + +func (c *envChanges) commitTx(tx *types.Transaction, chData chainData) (*types.Receipt, int, error) { + var ( + gasPoolBefore = new(core.GasPool).AddGas(c.gasPool.Gas()) + usedGasBefore = c.usedGas + txsBefore = c.txs[:] + receiptsBefore = c.receipts[:] + profitBefore = new(big.Int).Set(c.profit) + ) + signer := c.env.signer + sender, err := types.Sender(signer, tx) + if err != nil { + return nil, popTx, err + } + + gasPrice, err := tx.EffectiveGasTip(c.env.header.BaseFee) + if err != nil { + return nil, shiftTx, err + } + + if _, in := chData.blacklist[sender]; in { + return nil, popTx, errors.New("blacklist violation, tx.sender") + } + + if to := tx.To(); to != nil { + if _, in := chData.blacklist[*to]; in { + return nil, popTx, errors.New("blacklist violation, tx.to") + } + } + + cfg := *chData.chain.GetVMConfig() + touchTracer := logger.NewAccountTouchTracer() + cfg.Tracer = touchTracer + cfg.Debug = true + + c.env.state.SetTxContext(tx.Hash(), c.env.tcount+len(c.txs)) + receipt, err := core.ApplyTransaction(chData.chainConfig, chData.chain, &c.env.coinbase, c.gasPool, c.env.state, c.env.header, tx, &c.usedGas, cfg, nil) + if err != nil { + c.rollback(usedGasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + + switch { + case errors.Is(err, core.ErrGasLimitReached): + // Pop the current out-of-gas transaction without shifting in the next from the account + from, _ := types.Sender(signer, tx) + log.Trace("Gas limit exceeded for current block", "sender", from) + return receipt, popTx, err + + case errors.Is(err, core.ErrNonceTooLow): + // New head notification data race between the transaction pool and miner, shift + from, _ := types.Sender(signer, tx) + log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + return receipt, shiftTx, err + + case errors.Is(err, core.ErrNonceTooHigh): + // Reorg notification data race between the transaction pool and miner, skip account = + from, _ := types.Sender(signer, tx) + log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) + return receipt, popTx, err + + case errors.Is(err, core.ErrTxTypeNotSupported): + // Pop the unsupported transaction without shifting in the next from the account + from, _ := types.Sender(signer, tx) + log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) + return receipt, popTx, err + + default: + // Strange error, discard the transaction and get the next in line (note, the + // nonce-too-high clause will prevent us from executing in vain). + log.Trace("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + return receipt, shiftTx, err + } + } + + for _, address := range touchTracer.TouchedAddresses() { + if _, in := chData.blacklist[address]; in { + c.rollback(usedGasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return nil, popTx, errors.New("blacklist violation, tx trace") + } + } + + c.profit.Add(c.profit, new(big.Int).Mul(new(big.Int).SetUint64(receipt.GasUsed), gasPrice)) + c.txs = append(c.txs, tx) + c.receipts = append(c.receipts, receipt) + + return receipt, shiftTx, nil +} + +func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainData) error { + var ( + profitBefore = new(big.Int).Set(c.profit) + coinbaseBefore = new(big.Int).Set(c.env.state.GetBalance(c.env.coinbase)) + gasUsedBefore = c.usedGas + gasPoolBefore = new(core.GasPool).AddGas(c.gasPool.Gas()) + txsBefore = c.txs[:] + receiptsBefore = c.receipts[:] + hasBaseFee = c.env.header.BaseFee != nil + + bundleErr error + ) + + for _, tx := range bundle.OriginalBundle.Txs { + if hasBaseFee && tx.Type() == types.DynamicFeeTxType { + // Sanity check for extremely large numbers + if tx.GasFeeCap().BitLen() > 256 { + bundleErr = core.ErrFeeCapVeryHigh + break + } + if tx.GasTipCap().BitLen() > 256 { + bundleErr = core.ErrTipVeryHigh + break + } + + // Ensure gasFeeCap is greater than or equal to gasTipCap. + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + bundleErr = core.ErrTipAboveFeeCap + break + } + } + receipt, _, err := c.commitTx(tx, chData) + + if err != nil { + log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) + bundleErr = err + break + } + + if receipt.Status != types.ReceiptStatusSuccessful && !bundle.OriginalBundle.RevertingHash(tx.Hash()) { + log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) + bundleErr = errors.New("bundle tx revert") + break + } + } + + if bundleErr != nil { + c.rollback(gasUsedBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return bundleErr + } + + var ( + bundleProfit = new(big.Int).Sub(c.env.state.GetBalance(c.env.coinbase), coinbaseBefore) + gasUsed = c.usedGas - gasUsedBefore + + effGP = new(big.Int).Div(bundleProfit, new(big.Int).SetUint64(gasUsed)) + simEffGP = new(big.Int).Set(bundle.MevGasPrice) + ) + + // allow >-1% divergence + effGP.Mul(effGP, common.Big100) + simEffGP.Mul(simEffGP, big.NewInt(99)) + if simEffGP.Cmp(effGP) > 0 { + log.Trace("Bundle underpays after inclusion", "bundle", bundle.OriginalBundle.Hash) + c.rollback(gasUsedBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return errors.New("bundle underpays") + } + + c.profit.Add(profitBefore, bundleProfit) + return nil +} + +func (c *envChanges) CommitSBundle(sbundle *types.SimSBundle, chData chainData, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { + if key == nil { + return errNoPrivateKey + } + + var ( + coinbaseBefore = new(big.Int).Set(c.env.state.GetBalance(c.env.coinbase)) + gasPoolBefore = new(core.GasPool).AddGas(c.gasPool.Gas()) + gasBefore = c.usedGas + txsBefore = c.txs[:] + receiptsBefore = c.receipts[:] + profitBefore = new(big.Int).Set(c.profit) + ) + + if err := c.commitSBundle(sbundle.Bundle, chData, key, algoConf); err != nil { + c.rollback(gasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return err + } + + var ( + coinbaseAfter = c.env.state.GetBalance(c.env.header.Coinbase) + gasAfter = c.usedGas + + coinbaseDelta = new(big.Int).Sub(coinbaseAfter, coinbaseBefore) + gasDelta = new(big.Int).SetUint64(gasAfter - gasBefore) + ) + if coinbaseDelta.Cmp(common.Big0) < 0 { + c.rollback(gasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return errors.New("coinbase balance decreased") + } + + gotEGP := new(big.Int).Div(coinbaseDelta, gasDelta) + simEGP := new(big.Int).Set(sbundle.MevGasPrice) + + // allow > 1% difference + actualEGP := new(big.Int).Mul(gotEGP, common.Big100) + simulatedEGP := new(big.Int).Mul(simEGP, big.NewInt(99)) + + if simulatedEGP.Cmp(actualEGP) > 0 { + c.rollback(gasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return &lowProfitError{ + ExpectedEffectiveGasPrice: simEGP, + ActualEffectiveGasPrice: gotEGP, + } + } + + if algoConf.EnforceProfit { + // if profit is enforced between simulation and actual commit, only allow >-1% divergence + simulatedProfit := new(big.Int).Set(sbundle.Profit) + actualProfit := new(big.Int).Set(coinbaseDelta) + + // We want to make simulated profit smaller to allow for some leeway in cases where the actual profit is + // lower due to transaction ordering + simulatedProfitMultiple := new(big.Int).Mul(simulatedProfit, algoConf.ProfitThresholdPercent) + actualProfitMultiple := new(big.Int).Mul(actualProfit, common.Big100) + + if simulatedProfitMultiple.Cmp(actualProfitMultiple) > 0 { + log.Trace("Lower sbundle profit found after inclusion", "sbundle", sbundle.Bundle.Hash()) + c.rollback(gasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return &lowProfitError{ + ExpectedProfit: simulatedProfit, + ActualProfit: actualProfit, + } + } + } + + return nil +} + +func (c *envChanges) commitSBundle(sbundle *types.SBundle, chData chainData, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { + + var ( + // check inclusion + minBlock = sbundle.Inclusion.BlockNumber + maxBlock = sbundle.Inclusion.MaxBlockNumber + ) + if current := c.env.header.Number.Uint64(); current < minBlock || current > maxBlock { + return fmt.Errorf("bundle inclusion block number out of range: %d <= %d <= %d", minBlock, current, maxBlock) + } + + var ( + // extract constraints into convenient format + refundIdx = make([]bool, len(sbundle.Body)) + refundPercents = make([]int, len(sbundle.Body)) + ) + for _, el := range sbundle.Validity.Refund { + refundIdx[el.BodyIdx] = true + refundPercents[el.BodyIdx] = el.Percent + } + + var ( + totalProfit *big.Int = new(big.Int) + refundableProfit *big.Int = new(big.Int) + + coinbaseDelta = new(big.Int) + coinbaseBefore *big.Int + ) + + // insert body and check it + for i, el := range sbundle.Body { + coinbaseDelta.Set(common.Big0) + coinbaseBefore = c.env.state.GetBalance(c.env.coinbase) + + if el.Tx != nil { + receipt, _, err := c.commitTx(el.Tx, chData) + if err != nil { + return err + } + if receipt.Status != types.ReceiptStatusSuccessful && !el.CanRevert { + return errors.New("tx failed") + } + } else if el.Bundle != nil { + err := c.commitSBundle(el.Bundle, chData, key, algoConf) + if err != nil { + return err + } + } else { + return errors.New("invalid body element") + } + + coinbaseDelta.Set(c.env.state.GetBalance(c.env.coinbase)) + coinbaseDelta.Sub(coinbaseDelta, coinbaseBefore) + + totalProfit.Add(totalProfit, coinbaseDelta) + if !refundIdx[i] { + refundableProfit.Add(refundableProfit, coinbaseDelta) + } + } + + // enforce constraints + coinbaseDelta.Set(common.Big0) + coinbaseBefore = c.env.state.GetBalance(c.env.header.Coinbase) + for i, el := range refundPercents { + if !refundIdx[i] { + continue + } + refundConfig, err := types.GetRefundConfig(&sbundle.Body[i], c.env.signer) + if err != nil { + return err + } + + maxPayoutCost := new(big.Int).Set(core.SbundlePayoutMaxCost) + maxPayoutCost.Mul(maxPayoutCost, big.NewInt(int64(len(refundConfig)))) + maxPayoutCost.Mul(maxPayoutCost, c.env.header.BaseFee) + + allocatedValue := common.PercentOf(refundableProfit, el) + allocatedValue.Sub(allocatedValue, maxPayoutCost) + + if allocatedValue.Cmp(common.Big0) < 0 { + return fmt.Errorf("negative payout") + } + + for _, refund := range refundConfig { + refundValue := common.PercentOf(allocatedValue, refund.Percent) + refundReceiver := refund.Address + rec, err := c.commitPayoutTx(refundValue, c.env.header.Coinbase, refundReceiver, core.SbundlePayoutMaxCostInt, key, chData) + if err != nil { + return err + } + if rec.Status != types.ReceiptStatusSuccessful { + return fmt.Errorf("refund tx failed") + } + log.Trace("Committed kickback", "payout", ethIntToFloat(allocatedValue), "receiver", refundReceiver) + } + } + coinbaseDelta.Set(c.env.state.GetBalance(c.env.header.Coinbase)) + coinbaseDelta.Sub(coinbaseDelta, coinbaseBefore) + totalProfit.Add(totalProfit, coinbaseDelta) + + if totalProfit.Cmp(common.Big0) < 0 { + return fmt.Errorf("negative profit") + } + return nil +} + +// revert reverts all changes to the environment - every commit operation must be followed by a revert or apply operation +func (c *envChanges) revert() error { + return c.env.state.MultiTxSnapshotRevert() +} + +func (c *envChanges) rollback( + gasUsedBefore uint64, gasPoolBefore *core.GasPool, profitBefore *big.Int, + txsBefore []*types.Transaction, receiptsBefore []*types.Receipt) { + + c.usedGas = gasUsedBefore + c.gasPool = gasPoolBefore + c.txs = txsBefore + c.receipts = receiptsBefore +} + +func (c *envChanges) apply() error { + if err := c.env.state.MultiTxSnapshotDiscard(); err != nil { + return err + } + + c.env.gasPool.SetGas(c.gasPool.Gas()) + c.env.header.GasUsed = c.usedGas + c.env.profit.Set(c.profit) + c.env.tcount += len(c.txs) + c.env.txs = append(c.env.txs, c.txs...) + c.env.receipts = append(c.env.receipts, c.receipts...) + return nil +} diff --git a/miner/env_changes_test.go b/miner/env_changes_test.go new file mode 100644 index 0000000000..82ba8bd913 --- /dev/null +++ b/miner/env_changes_test.go @@ -0,0 +1,384 @@ +package miner + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" + "math/big" + "testing" +) + +func TestTxCommitSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup() + + env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) + tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + changes, err := newEnvChanges(env) + if err != nil { + t.Fatalf("Error creating changes: %v", err) + } + + receipt, i, err := changes.commitTx(tx, chData) + if err != nil { + t.Fatal("can't commit transaction:", err) + } + if receipt.Status != 1 { + t.Fatal("tx failed", receipt) + } + if i != shiftTx { + t.Fatal("incorrect shift value") + } + + if env.tcount != 0 { + t.Fatal("env tcount modified") + } + if len(env.receipts) != 0 { + t.Fatal("env receipts modified") + } + if len(env.txs) != 0 { + t.Fatal("env txs modified") + } + if env.gasPool.Gas() != GasLimit { + t.Fatal("env gas pool modified") + } + + if changes.gasPool.AddGas(receipt.GasUsed).Gas() != GasLimit { + t.Fatal("envDiff gas pool incorrect") + } + if changes.usedGas != receipt.GasUsed { + t.Fatal("envDiff gas used is incorrect") + } + if len(changes.receipts) != 1 { + t.Fatal("envDiff receipts incorrect") + } + if len(changes.txs) != 1 { + t.Fatal("envDiff txs incorrect") + } +} +func TestBundleCommitSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup() + + env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + bundle := types.MevBundle{ + Txs: types.Transactions{tx1, tx2}, + BlockNumber: env.header.Number, + } + + simBundle, err := simulateBundle(env, bundle, chData, nil) + if err != nil { + t.Fatal("Failed to simulate bundle", err) + } + + err = changes.commitBundle(&simBundle, chData) + if err != nil { + t.Fatal("Failed to commit bundle", err) + } + + if len(changes.txs) != 2 { + t.Fatal("Incorrect new txs") + } + if len(changes.receipts) != 2 { + t.Fatal("Incorrect receipts txs") + } + if changes.gasPool.AddGas(21000*2).Gas() != GasLimit { + t.Fatal("Gas pool incorrect update") + } +} + +func TestErrorTxCommitSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup() + + env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + signers.nonces[1] = 10 + tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + _, i, err := changes.commitTx(tx, chData) + if err == nil { + t.Fatal("committed incorrect transaction:", err) + } + if i != popTx { + t.Fatal("incorrect shift value") + } + + if changes.gasPool.Gas() != GasLimit { + t.Fatal("envDiff gas pool incorrect") + } + if changes.usedGas != 0 { + t.Fatal("envDiff gas used incorrect") + } + if changes.profit.Sign() != 0 { + t.Fatal("envDiff new profit incorrect") + } + if len(changes.receipts) != 0 { + t.Fatal("envDiff receipts incorrect") + } + if len(changes.receipts) != 0 { + t.Fatal("envDiff txs incorrect") + } +} + +func TestCommitTxOverGasLimitSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup() + + env := newEnvironment(chData, statedb, signers.addresses[0], 21000, big.NewInt(1)) + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + receipt, i, err := changes.commitTx(tx1, chData) + if err != nil { + t.Fatal("can't commit transaction:", err) + } + if receipt.Status != 1 { + t.Fatal("tx failed", receipt) + } + if i != shiftTx { + t.Fatal("incorrect shift value") + } + + if changes.gasPool.Gas() != 0 { + t.Fatal("Env diff gas pool is not drained") + } + + _, _, err = changes.commitTx(tx2, chData) + require.Error(t, err, "committed tx over gas limit") +} + +func TestErrorBundleCommitSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup() + + env := newEnvironment(chData, statedb, signers.addresses[0], 21000*2, big.NewInt(1)) + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + // This tx will be included before bundle so bundle will fail because of gas limit + tx0 := signers.signTx(4, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + bundle := types.MevBundle{ + Txs: types.Transactions{tx1, tx2}, + BlockNumber: env.header.Number, + } + + simBundle, err := simulateBundle(env, bundle, chData, nil) + if err != nil { + t.Fatal("Failed to simulate bundle", err) + } + + _, _, err = changes.commitTx(tx0, chData) + if err != nil { + t.Fatal("Failed to commit tx0", err) + } + + gasPoolBefore := *changes.gasPool + gasUsedBefore := changes.usedGas + newProfitBefore := new(big.Int).Set(changes.profit) + balanceBefore := changes.env.state.GetBalance(signers.addresses[2]) + + err = changes.commitBundle(&simBundle, chData) + if err == nil { + t.Fatal("Committed failed bundle", err) + } + + if *changes.gasPool != gasPoolBefore { + t.Fatalf("gasPool changed [found: %d, expected: %d]", changes.gasPool.Gas(), gasPoolBefore.Gas()) + } + + if changes.usedGas != gasUsedBefore { + t.Fatal("gasUsed changed") + } + + balanceAfter := changes.env.state.GetBalance(signers.addresses[2]) + if balanceAfter.Cmp(balanceBefore) != 0 { + t.Fatal("balance changed") + } + + if changes.profit.Cmp(newProfitBefore) != 0 { + t.Fatal("newProfit changed") + } + + if len(changes.txs) != 1 { + t.Fatal("Incorrect new txs") + } + if len(changes.receipts) != 1 { + t.Fatal("Incorrect receipts txs") + } +} + +func TestErrorSBundleCommitSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup() + + env := newEnvironment(chData, statedb, signers.addresses[0], 21000*2, big.NewInt(1)) + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + // This tx will be included before sbundle so sbundle will fail because of gas limit + tx0 := signers.signTx(4, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + sbundle := types.SimSBundle{ + Bundle: &types.SBundle{ + Inclusion: types.BundleInclusion{ + BlockNumber: env.header.Number.Uint64(), + MaxBlockNumber: env.header.Number.Uint64(), + }, + Body: []types.BundleBody{ + { + Tx: tx1, + }, + { + Tx: tx2, + }, + }, + }, + // with such small values this bundle will never be rejected based on insufficient profit + MevGasPrice: big.NewInt(1), + Profit: big.NewInt(1), + } + + _, _, err = changes.commitTx(tx0, chData) + if err != nil { + t.Fatal("Failed to commit tx0", err) + } + + gasPoolBefore := *changes.gasPool + gasUsedBefore := changes.usedGas + newProfitBefore := new(big.Int).Set(changes.profit) + balanceBefore := changes.env.state.GetBalance(signers.addresses[2]) + + err = changes.CommitSBundle(&sbundle, chData, builderPrivKey, defaultAlgorithmConfig) + if err == nil { + t.Fatal("Committed failed bundle", err) + } + + if *changes.gasPool != gasPoolBefore { + t.Fatalf("gasPool changed [found: %d, expected: %d]", changes.gasPool.Gas(), gasPoolBefore.Gas()) + } + + if changes.usedGas != gasUsedBefore { + t.Fatal("gasUsed changed") + } + + balanceAfter := changes.env.state.GetBalance(signers.addresses[2]) + if balanceAfter.Cmp(balanceBefore) != 0 { + t.Fatal("balance changed") + } + + if changes.profit.Cmp(newProfitBefore) != 0 { + t.Fatal("newProfit changed") + } + + if len(changes.txs) != 1 { + t.Fatal("Incorrect new txs") + } + if len(changes.receipts) != 1 { + t.Fatal("Incorrect receipts txs") + } +} + +func TestBlacklistSnaps(t *testing.T) { + statedb, chData, signers := genTestSetup() + + // NOTE: intermediate root hash MUST be generated before env changes are instantiated, otherwise state.MultiTxSnapshot + // will be invalidated and the test will fail + beforeRoot := statedb.IntermediateRoot(true) + + env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + blacklist := map[common.Address]struct{}{ + signers.addresses[3]: {}, + } + chData.blacklist = blacklist + + gasPoolBefore := *changes.gasPool + gasUsedBefore := changes.usedGas + balanceBefore := changes.env.state.GetBalance(signers.addresses[3]) + + tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[3], big.NewInt(77), []byte{}) + _, _, err = changes.commitTx(tx, chData) + if err == nil { + t.Fatal("committed blacklisted transaction: to") + } + + tx = signers.signTx(3, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[1], big.NewInt(88), []byte{}) + _, _, err = changes.commitTx(tx, chData) + if err == nil { + t.Fatal("committed blacklisted transaction: sender") + } + + calldata := make([]byte, 32-20, 20) + calldata = append(calldata, signers.addresses[3].Bytes()...) + + tx = signers.signTx(4, 40000, big.NewInt(0), big.NewInt(1), payProxyAddress, big.NewInt(99), calldata) + _, _, err = changes.commitTx(tx, chData) + t.Log("balance", changes.env.state.GetBalance(signers.addresses[3])) + + if err == nil { + t.Fatal("committed blacklisted transaction: trace") + } + + err = changes.revert() + if err != nil { + t.Fatal("failed reverting changes", err) + } + + if *changes.gasPool != gasPoolBefore { + t.Fatalf("gasPool changed [found: %d, expected: %d]", changes.gasPool.Gas(), gasPoolBefore.Gas()) + } + + if changes.usedGas != gasUsedBefore { + t.Fatal("gasUsed changed") + } + + if changes.profit.Sign() != 0 { + t.Fatal("newProfit changed") + } + + if changes.env.state.GetBalance(signers.addresses[3]).Cmp(balanceBefore) != 0 { + t.Fatalf("blacklisted balance changed [found: %d, expected: %d]", + changes.env.state.GetBalance(signers.addresses[3]), balanceBefore) + } + + if len(changes.txs) != 0 { + t.Fatal("newTxs changed") + } + + if len(changes.receipts) != 0 { + t.Fatal("newReceipts changed") + } + + afterRoot := statedb.IntermediateRoot(true) + if beforeRoot != afterRoot { + t.Fatal("statedb root changed") + } +} diff --git a/miner/environment_diff.go b/miner/environment_diff.go new file mode 100644 index 0000000000..05a6f07140 --- /dev/null +++ b/miner/environment_diff.go @@ -0,0 +1,399 @@ +package miner + +import ( + "crypto/ecdsa" + "errors" + "fmt" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "math/big" +) + +// environmentDiff is a helper struct used to apply transactions to a block using a copy of the state at that block +type environmentDiff struct { + baseEnvironment *environment + header *types.Header + gasPool *core.GasPool // available gas used to pack transactions + state *state.StateDB // apply state changes here + newProfit *big.Int + newTxs []*types.Transaction + newReceipts []*types.Receipt +} + +func newEnvironmentDiff(env *environment) *environmentDiff { + gasPool := new(core.GasPool).AddGas(env.gasPool.Gas()) + return &environmentDiff{ + baseEnvironment: env, + header: types.CopyHeader(env.header), + gasPool: gasPool, + state: env.state.Copy(), + newProfit: new(big.Int), + } +} + +func (envDiff *environmentDiff) copy() *environmentDiff { + gasPool := new(core.GasPool).AddGas(envDiff.gasPool.Gas()) + + return &environmentDiff{ + baseEnvironment: envDiff.baseEnvironment.copy(), + header: types.CopyHeader(envDiff.header), + gasPool: gasPool, + state: envDiff.state.Copy(), + newProfit: new(big.Int).Set(envDiff.newProfit), + newTxs: envDiff.newTxs[:], + newReceipts: envDiff.newReceipts[:], + } +} + +func (envDiff *environmentDiff) applyToBaseEnv() { + env := envDiff.baseEnvironment + env.gasPool = new(core.GasPool).AddGas(envDiff.gasPool.Gas()) + env.header = envDiff.header + env.state.StopPrefetcher() + env.state = envDiff.state + env.profit.Add(env.profit, envDiff.newProfit) + env.tcount += len(envDiff.newTxs) + env.txs = append(env.txs, envDiff.newTxs...) + env.receipts = append(env.receipts, envDiff.newReceipts...) +} + +// commit tx to envDiff +func (envDiff *environmentDiff) commitTx(tx *types.Transaction, chData chainData) (*types.Receipt, int, error) { + header := envDiff.header + coinbase := &envDiff.baseEnvironment.coinbase + signer := envDiff.baseEnvironment.signer + + gasPrice, err := tx.EffectiveGasTip(header.BaseFee) + if err != nil { + return nil, shiftTx, err + } + + envDiff.state.SetTxContext(tx.Hash(), envDiff.baseEnvironment.tcount+len(envDiff.newTxs)) + + receipt, newState, err := applyTransactionWithBlacklist(signer, chData.chainConfig, chData.chain, coinbase, + envDiff.gasPool, envDiff.state, header, tx, &header.GasUsed, *chData.chain.GetVMConfig(), chData.blacklist) + + envDiff.state = newState + if err != nil { + switch { + case errors.Is(err, core.ErrGasLimitReached): + // Pop the current out-of-gas transaction without shifting in the next from the account + from, _ := types.Sender(signer, tx) + log.Trace("Gas limit exceeded for current block", "sender", from) + return receipt, popTx, err + + case errors.Is(err, core.ErrNonceTooLow): + // New head notification data race between the transaction pool and miner, shift + from, _ := types.Sender(signer, tx) + log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + return receipt, shiftTx, err + + case errors.Is(err, core.ErrNonceTooHigh): + // Reorg notification data race between the transaction pool and miner, skip account = + from, _ := types.Sender(signer, tx) + log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) + return receipt, popTx, err + + case errors.Is(err, core.ErrTxTypeNotSupported): + // Pop the unsupported transaction without shifting in the next from the account + from, _ := types.Sender(signer, tx) + log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) + return receipt, popTx, err + + default: + // Strange error, discard the transaction and get the next in line (note, the + // nonce-too-high clause will prevent us from executing in vain). + log.Trace("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + return receipt, shiftTx, err + } + } + + envDiff.newProfit = envDiff.newProfit.Add(envDiff.newProfit, gasPrice.Mul(gasPrice, big.NewInt(int64(receipt.GasUsed)))) + envDiff.newTxs = append(envDiff.newTxs, tx) + envDiff.newReceipts = append(envDiff.newReceipts, receipt) + + return receipt, shiftTx, nil +} + +// Commit Bundle to env diff +func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chData chainData, interrupt *int32, algoConf algorithmConfig) error { + coinbase := envDiff.baseEnvironment.coinbase + tmpEnvDiff := envDiff.copy() + + coinbaseBalanceBefore := tmpEnvDiff.state.GetBalance(coinbase) + + profitBefore := new(big.Int).Set(tmpEnvDiff.newProfit) + var gasUsed uint64 + + for _, tx := range bundle.OriginalBundle.Txs { + if tmpEnvDiff.header.BaseFee != nil && tx.Type() == types.DynamicFeeTxType { + // Sanity check for extremely large numbers + if tx.GasFeeCap().BitLen() > 256 { + return core.ErrFeeCapVeryHigh + } + if tx.GasTipCap().BitLen() > 256 { + return core.ErrTipVeryHigh + } + // Ensure gasFeeCap is greater than or equal to gasTipCap. + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + return core.ErrTipAboveFeeCap + } + } + + if tx.Value().Sign() == -1 { + return core.ErrNegativeValue + } + + _, err := tx.EffectiveGasTip(envDiff.header.BaseFee) + if err != nil { + return err + } + + _, err = types.Sender(envDiff.baseEnvironment.signer, tx) + if err != nil { + return err + } + + if checkInterrupt(interrupt) { + return errInterrupt + } + + receipt, _, err := tmpEnvDiff.commitTx(tx, chData) + + if err != nil { + log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) + return err + } + + if receipt.Status != types.ReceiptStatusSuccessful && !bundle.OriginalBundle.RevertingHash(tx.Hash()) { + log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) + return errors.New("bundle tx revert") + } + + gasUsed += receipt.GasUsed + } + coinbaseBalanceAfter := tmpEnvDiff.state.GetBalance(coinbase) + coinbaseBalanceDelta := new(big.Int).Sub(coinbaseBalanceAfter, coinbaseBalanceBefore) + tmpEnvDiff.newProfit.Add(profitBefore, coinbaseBalanceDelta) + + bundleProfit := coinbaseBalanceDelta + + bundleActualEffGP := bundleProfit.Div(bundleProfit, big.NewInt(int64(gasUsed))) + bundleSimEffGP := new(big.Int).Set(bundle.MevGasPrice) + + // allow >-1% divergence + actualEGP := new(big.Int).Mul(bundleActualEffGP, common.Big100) // bundle actual effective gas price * 100 + simulatedEGP := new(big.Int).Mul(bundleSimEffGP, big.NewInt(99)) // bundle simulated effective gas price * 99 + + if simulatedEGP.Cmp(actualEGP) > 0 { + log.Trace("Bundle underpays after inclusion", "bundle", bundle.OriginalBundle.Hash) + return &lowProfitError{ + ExpectedEffectiveGasPrice: bundleSimEffGP, + ActualEffectiveGasPrice: bundleActualEffGP, + } + } + + if algoConf.EnforceProfit { + // if profit is enforced between simulation and actual commit, only allow ProfitThresholdPercent divergence + simulatedBundleProfit := new(big.Int).Set(bundle.TotalEth) + actualBundleProfit := new(big.Int).Mul(bundleActualEffGP, big.NewInt(int64(gasUsed))) + + // We want to make simulated profit smaller to allow for some leeway in cases where the actual profit is + // lower due to transaction ordering + simulatedProfitMultiple := new(big.Int).Mul(simulatedBundleProfit, algoConf.ProfitThresholdPercent) + actualProfitMultiple := new(big.Int).Mul(actualBundleProfit, common.Big100) + + if simulatedProfitMultiple.Cmp(actualProfitMultiple) > 0 { + log.Trace("Lower bundle profit found after inclusion", "bundle", bundle.OriginalBundle.Hash) + return &lowProfitError{ + ExpectedProfit: simulatedBundleProfit, + ActualProfit: actualBundleProfit, + } + } + } + + *envDiff = *tmpEnvDiff + return nil +} + +func (envDiff *environmentDiff) commitPayoutTx(amount *big.Int, sender, receiver common.Address, gas uint64, prv *ecdsa.PrivateKey, chData chainData) (*types.Receipt, error) { + return commitPayoutTx(PayoutTransactionParams{ + Amount: amount, + BaseFee: envDiff.header.BaseFee, + ChainData: chData, + Gas: gas, + CommitFn: envDiff.commitTx, + Receiver: receiver, + Sender: sender, + SenderBalance: envDiff.state.GetBalance(sender), + SenderNonce: envDiff.state.GetNonce(sender), + Signer: envDiff.baseEnvironment.signer, + PrivateKey: prv, + }) +} + +func (envDiff *environmentDiff) commitSBundle(b *types.SimSBundle, chData chainData, interrupt *int32, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { + if key == nil { + return errNoPrivateKey + } + + tmpEnvDiff := envDiff.copy() + + coinbaseBefore := tmpEnvDiff.state.GetBalance(tmpEnvDiff.header.Coinbase) + gasBefore := tmpEnvDiff.gasPool.Gas() + + if err := tmpEnvDiff.commitSBundleInner(b.Bundle, chData, interrupt, key); err != nil { + return err + } + + coinbaseAfter := tmpEnvDiff.state.GetBalance(tmpEnvDiff.header.Coinbase) + gasAfter := tmpEnvDiff.gasPool.Gas() + + coinbaseDelta := new(big.Int).Sub(coinbaseAfter, coinbaseBefore) + gasDelta := new(big.Int).SetUint64(gasBefore - gasAfter) + + if coinbaseDelta.Cmp(common.Big0) < 0 { + return errors.New("coinbase balance decreased") + } + + gotEGP := new(big.Int).Div(coinbaseDelta, gasDelta) + simEGP := new(big.Int).Set(b.MevGasPrice) + + // allow > 1% difference + actualEGP := new(big.Int).Mul(gotEGP, big.NewInt(101)) + simulatedEGP := new(big.Int).Mul(simEGP, common.Big100) + + if simulatedEGP.Cmp(actualEGP) > 0 { + return &lowProfitError{ + ExpectedEffectiveGasPrice: simEGP, + ActualEffectiveGasPrice: gotEGP, + } + } + + if algoConf.EnforceProfit { + // if profit is enforced between simulation and actual commit, only allow >-1% divergence + simulatedSbundleProfit := new(big.Int).Set(b.Profit) + actualSbundleProfit := new(big.Int).Set(coinbaseDelta) + + // We want to make simulated profit smaller to allow for some leeway in cases where the actual profit is + // lower due to transaction ordering + simulatedProfitMultiple := new(big.Int).Mul(simulatedSbundleProfit, algoConf.ProfitThresholdPercent) + actualProfitMultiple := new(big.Int).Mul(actualSbundleProfit, common.Big100) + + if simulatedProfitMultiple.Cmp(actualProfitMultiple) > 0 { + log.Trace("Lower sbundle profit found after inclusion", "sbundle", b.Bundle.Hash()) + return &lowProfitError{ + ExpectedProfit: simulatedSbundleProfit, + ActualProfit: actualSbundleProfit, + } + } + } + + *envDiff = *tmpEnvDiff + return nil +} + +func (envDiff *environmentDiff) commitSBundleInner(b *types.SBundle, chData chainData, interrupt *int32, key *ecdsa.PrivateKey) error { + // check inclusion + minBlock := b.Inclusion.BlockNumber + maxBlock := b.Inclusion.MaxBlockNumber + if current := envDiff.header.Number.Uint64(); current < minBlock || current > maxBlock { + return fmt.Errorf("bundle inclusion block number out of range: %d <= %d <= %d", minBlock, current, maxBlock) + } + + // extract constraints into convenient format + refundIdx := make([]bool, len(b.Body)) + refundPercents := make([]int, len(b.Body)) + for _, el := range b.Validity.Refund { + refundIdx[el.BodyIdx] = true + refundPercents[el.BodyIdx] = el.Percent + } + + var ( + totalProfit *big.Int = new(big.Int) + refundableProfit *big.Int = new(big.Int) + ) + + var ( + coinbaseDelta = new(big.Int) + coinbaseBefore *big.Int + ) + // insert body and check it + for i, el := range b.Body { + coinbaseDelta.Set(common.Big0) + coinbaseBefore = envDiff.state.GetBalance(envDiff.header.Coinbase) + + if el.Tx != nil { + receipt, _, err := envDiff.commitTx(el.Tx, chData) + if err != nil { + return err + } + if receipt.Status != types.ReceiptStatusSuccessful && !el.CanRevert { + return errors.New("tx failed") + } + } else if el.Bundle != nil { + err := envDiff.commitSBundleInner(el.Bundle, chData, interrupt, key) + if err != nil { + return err + } + } else { + return errors.New("invalid body element") + } + + coinbaseDelta.Set(envDiff.state.GetBalance(envDiff.header.Coinbase)) + coinbaseDelta.Sub(coinbaseDelta, coinbaseBefore) + + totalProfit.Add(totalProfit, coinbaseDelta) + if !refundIdx[i] { + refundableProfit.Add(refundableProfit, coinbaseDelta) + } + } + + // enforce constraints + coinbaseDelta.Set(common.Big0) + coinbaseBefore = envDiff.state.GetBalance(envDiff.header.Coinbase) + for i, el := range refundPercents { + if !refundIdx[i] { + continue + } + refundConfig, err := types.GetRefundConfig(&b.Body[i], envDiff.baseEnvironment.signer) + if err != nil { + return err + } + + maxPayoutCost := new(big.Int).Set(core.SbundlePayoutMaxCost) + maxPayoutCost.Mul(maxPayoutCost, big.NewInt(int64(len(refundConfig)))) + maxPayoutCost.Mul(maxPayoutCost, envDiff.header.BaseFee) + + allocatedValue := common.PercentOf(refundableProfit, el) + allocatedValue.Sub(allocatedValue, maxPayoutCost) + + if allocatedValue.Cmp(common.Big0) < 0 { + return fmt.Errorf("negative payout") + } + + for _, refund := range refundConfig { + refundValue := common.PercentOf(allocatedValue, refund.Percent) + refundReceiver := refund.Address + rec, err := envDiff.commitPayoutTx(refundValue, envDiff.header.Coinbase, refundReceiver, core.SbundlePayoutMaxCostInt, key, chData) + if err != nil { + return err + } + if rec.Status != types.ReceiptStatusSuccessful { + return fmt.Errorf("refund tx failed") + } + log.Trace("Committed kickback", "payout", ethIntToFloat(allocatedValue), "receiver", refundReceiver) + } + } + coinbaseDelta.Set(envDiff.state.GetBalance(envDiff.header.Coinbase)) + coinbaseDelta.Sub(coinbaseDelta, coinbaseBefore) + totalProfit.Add(totalProfit, coinbaseDelta) + + if totalProfit.Cmp(common.Big0) < 0 { + return fmt.Errorf("negative profit") + } + return nil +} diff --git a/miner/miner.go b/miner/miner.go index b4f2f8c709..3e7b2d47a8 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -84,21 +84,22 @@ func AlgoTypeFlagToEnum(algoString string) (AlgoType, error) { // Config is the configuration parameters of mining. type Config struct { - Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account) - Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash). - NotifyFull bool `toml:",omitempty"` // Notify with pending block headers instead of work packages - ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner - GasFloor uint64 // Target gas floor for mined blocks. - GasCeil uint64 // Target gas ceiling for mined blocks. - GasPrice *big.Int // Minimum gas price for mining a transaction - AlgoType AlgoType // Algorithm to use for block building - Recommit time.Duration // The time interval for miner to re-create mining work. - Noverify bool // Disable remote mining solution verification(only useful in ethash). - BuilderTxSigningKey *ecdsa.PrivateKey `toml:",omitempty"` // Signing key of builder coinbase to make transaction to validator - MaxMergedBundles int - Blocklist []common.Address `toml:",omitempty"` - NewPayloadTimeout time.Duration // The maximum time allowance for creating a new payload - PriceCutoffPercent int // Effective gas price cutoff % used for bucketing transactions by price (only useful in greedy-buckets AlgoType) + Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account) + Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash). + NotifyFull bool `toml:",omitempty"` // Notify with pending block headers instead of work packages + ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner + GasFloor uint64 // Target gas floor for mined blocks. + GasCeil uint64 // Target gas ceiling for mined blocks. + GasPrice *big.Int // Minimum gas price for mining a transaction + AlgoType AlgoType // Algorithm to use for block building + Recommit time.Duration // The time interval for miner to re-create mining work. + Noverify bool // Disable remote mining solution verification(only useful in ethash). + BuilderTxSigningKey *ecdsa.PrivateKey `toml:",omitempty"` // Signing key of builder coinbase to make transaction to validator + MaxMergedBundles int + Blocklist []common.Address `toml:",omitempty"` + NewPayloadTimeout time.Duration // The maximum time allowance for creating a new payload + PriceCutoffPercent int // Effective gas price cutoff % used for bucketing transactions by price (only useful in greedy-buckets AlgoType) + EnableMultiTransactionSnapshot bool // Enable block building with multi-transaction snapshots to reduce state copying (note: experimental) } // DefaultConfig contains default settings for miner. @@ -110,9 +111,10 @@ var DefaultConfig = Config{ // consensus-layer usually will wait a half slot of time(6s) // for payload generation. It should be enough for Geth to // run 3 rounds. - Recommit: 2 * time.Second, - NewPayloadTimeout: 2 * time.Second, - PriceCutoffPercent: defaultPriceCutoffPercent, + Recommit: 2 * time.Second, + NewPayloadTimeout: 2 * time.Second, + PriceCutoffPercent: defaultPriceCutoffPercent, + EnableMultiTransactionSnapshot: defaultAlgorithmConfig.EnableMultiTxSnap, } // Miner creates blocks and searches for proof-of-work values. diff --git a/miner/order_apply_changes.go b/miner/order_apply_changes.go deleted file mode 100644 index b7ae71afdd..0000000000 --- a/miner/order_apply_changes.go +++ /dev/null @@ -1,198 +0,0 @@ -package miner - -import ( - "errors" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth/tracers/logger" - "github.com/ethereum/go-ethereum/log" - "math/big" -) - -// orderApplyChanges is a helper struct to apply and revert changes to the environment -type orderApplyChanges struct { - env *environment - gasPool *core.GasPool - usedGas uint64 - profit *big.Int - txs []*types.Transaction - receipts []*types.Receipt -} - -func newOrderApplyChanges(env *environment) (*orderApplyChanges, error) { - if err := env.state.MultiTxSnapshot(); err != nil { - return nil, err - } - - return &orderApplyChanges{ - env: env, - gasPool: new(core.GasPool).AddGas(env.gasPool.Gas()), - usedGas: env.header.GasUsed, - profit: new(big.Int).Set(env.profit), - txs: make([]*types.Transaction, 0), - receipts: make([]*types.Receipt, 0), - }, nil -} - -func (c *orderApplyChanges) commitTx(tx *types.Transaction, chData chainData) (*types.Receipt, int, error) { - signer := c.env.signer - sender, err := types.Sender(signer, tx) - if err != nil { - return nil, popTx, err - } - - gasPrice, err := tx.EffectiveGasTip(c.env.header.BaseFee) - if err != nil { - return nil, shiftTx, err - } - - if _, in := chData.blacklist[sender]; in { - return nil, popTx, errors.New("blacklist violation, tx.sender") - } - - if to := tx.To(); to != nil { - if _, in := chData.blacklist[*to]; in { - return nil, popTx, errors.New("blacklist violation, tx.to") - } - } - - cfg := *chData.chain.GetVMConfig() - touchTracer := logger.NewAccountTouchTracer() - cfg.Tracer = touchTracer - cfg.Debug = true - - c.env.state.SetTxContext(tx.Hash(), c.env.tcount+len(c.txs)) - receipt, err := core.ApplyTransaction(chData.chainConfig, chData.chain, &c.env.coinbase, c.gasPool, c.env.state, c.env.header, tx, &c.usedGas, cfg, nil) - if err != nil { - switch { - case errors.Is(err, core.ErrGasLimitReached): - // Pop the current out-of-gas transaction without shifting in the next from the account - from, _ := types.Sender(signer, tx) - log.Trace("Gas limit exceeded for current block", "sender", from) - return receipt, popTx, err - - case errors.Is(err, core.ErrNonceTooLow): - // New head notification data race between the transaction pool and miner, shift - from, _ := types.Sender(signer, tx) - log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) - return receipt, shiftTx, err - - case errors.Is(err, core.ErrNonceTooHigh): - // Reorg notification data race between the transaction pool and miner, skip account = - from, _ := types.Sender(signer, tx) - log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) - return receipt, popTx, err - - case errors.Is(err, core.ErrTxTypeNotSupported): - // Pop the unsupported transaction without shifting in the next from the account - from, _ := types.Sender(signer, tx) - log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) - return receipt, popTx, err - - default: - // Strange error, discard the transaction and get the next in line (note, the - // nonce-too-high clause will prevent us from executing in vain). - log.Trace("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) - return receipt, shiftTx, err - } - } - - for _, address := range touchTracer.TouchedAddresses() { - if _, in := chData.blacklist[address]; in { - return nil, popTx, errors.New("blacklist violation, tx trace") - } - } - - c.profit.Add(c.profit, new(big.Int).Mul(new(big.Int).SetUint64(receipt.GasUsed), gasPrice)) - c.txs = append(c.txs, tx) - c.receipts = append(c.receipts, receipt) - - return receipt, shiftTx, nil -} - -func (c *orderApplyChanges) commitBundle(bundle *types.SimulatedBundle, chData chainData) error { - var ( - profitBefore = new(big.Int).Set(c.profit) - coinbaseBefore = new(big.Int).Set(c.env.state.GetBalance(c.env.coinbase)) - gasUsedBefore = c.usedGas - hasBaseFee = c.env.header.BaseFee != nil - - bundleErr error - ) - - for _, tx := range bundle.OriginalBundle.Txs { - if hasBaseFee && tx.Type() == types.DynamicFeeTxType { - // Sanity check for extremely large numbers - if tx.GasFeeCap().BitLen() > 256 { - bundleErr = core.ErrFeeCapVeryHigh - break - } - if tx.GasTipCap().BitLen() > 256 { - bundleErr = core.ErrTipVeryHigh - break - } - - // Ensure gasFeeCap is greater than or equal to gasTipCap. - if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { - bundleErr = core.ErrTipAboveFeeCap - break - } - } - receipt, _, err := c.commitTx(tx, chData) - - if err != nil { - log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) - bundleErr = err - break - } - - if receipt.Status != types.ReceiptStatusSuccessful && !bundle.OriginalBundle.RevertingHash(tx.Hash()) { - log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) - bundleErr = errors.New("bundle tx revert") - break - } - } - - if bundleErr != nil { - return bundleErr - } - - var ( - bundleProfit = new(big.Int).Sub(c.env.state.GetBalance(c.env.coinbase), coinbaseBefore) - gasUsed = c.usedGas - gasUsedBefore - - effGP = new(big.Int).Div(bundleProfit, new(big.Int).SetUint64(gasUsed)) - simEffGP = new(big.Int).Set(bundle.MevGasPrice) - ) - - // allow >-1% divergence - effGP.Mul(effGP, common.Big100) - simEffGP.Mul(simEffGP, big.NewInt(99)) - if simEffGP.Cmp(effGP) > 0 { - log.Trace("Bundle underpays after inclusion", "bundle", bundle.OriginalBundle.Hash) - return errors.New("bundle underpays") - } - - c.profit.Add(profitBefore, bundleProfit) - return nil -} - -// revert reverts all changes to the environment - every commit operation must be followed by a revert or apply operation -func (c *orderApplyChanges) revert() error { - return c.env.state.MultiTxSnapshotRevert() -} - -func (c *orderApplyChanges) apply() error { - if err := c.env.state.MultiTxSnapshotDiscard(); err != nil { - return err - } - - c.env.gasPool.SetGas(c.gasPool.Gas()) - c.env.header.GasUsed = c.usedGas - c.env.profit.Set(c.profit) - c.env.tcount += len(c.txs) - c.env.txs = append(c.env.txs, c.txs...) - c.env.receipts = append(c.env.receipts, c.receipts...) - return nil -} diff --git a/miner/worker.go b/miner/worker.go index 26770eedc4..57f8f78886 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1415,7 +1415,7 @@ func (w *worker) fillTransactionsAlgoWorker(interrupt *int32, env *environment) ExpectedProfit: nil, ProfitThresholdPercent: defaultProfitThreshold, PriceCutoffPercent: priceCutoffPercent, - EnableMultiTxSnap: true, + EnableMultiTxSnap: w.config.EnableMultiTransactionSnapshot, } builder, err := newGreedyBucketsBuilder( w.chain, w.chainConfig, algoConf, w.blockList, env, @@ -1429,10 +1429,16 @@ func (w *worker) fillTransactionsAlgoWorker(interrupt *int32, env *environment) case ALGO_GREEDY: fallthrough default: - builder := newGreedyBuilder( - w.chain, w.chainConfig, w.blockList, env, - w.config.BuilderTxSigningKey, interrupt, + algoConf := &defaultAlgorithmConfig + algoConf.EnableMultiTxSnap = w.config.EnableMultiTransactionSnapshot + + builder, err := newGreedyBuilder( + w.chain, w.chainConfig, algoConf, w.blockList, + env, w.config.BuilderTxSigningKey, interrupt, ) + if err != nil { + return nil, nil, nil, err + } newEnv, blockBundles, usedSbundle = builder.buildBlock(bundlesToConsider, sbundlesToConsider, pending) } From 4908ea8f7db40857dc63f074adbadbce664014a2 Mon Sep 17 00:00:00 2001 From: Vazha Date: Wed, 19 Jul 2023 20:23:31 +0300 Subject: [PATCH 11/46] Init BuilderRateLimitResubmitInterval builder config param from command line flag (#84) Co-authored-by: Vazha --- cmd/utils/flags.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 68185dae25..0a9aa42964 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1688,6 +1688,7 @@ func SetBuilderConfig(ctx *cli.Context, cfg *builder.Config) { cfg.BuilderRateLimitMaxBurst = ctx.Int(BuilderRateLimitMaxBurst.Name) cfg.BuilderSubmissionOffset = ctx.Duration(BuilderSubmissionOffset.Name) cfg.EnableCancellations = ctx.IsSet(BuilderEnableCancellations.Name) + cfg.BuilderRateLimitResubmitInterval = ctx.String(BuilderBlockResubmitInterval.Name) } // SetNodeConfig applies node-related command line flags to the config. From dc5cc4927a8dd4b15c63df78793daf24e98d889a Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Mon, 24 Jul 2023 14:26:18 -0500 Subject: [PATCH 12/46] Update readme to include greedy-buckets for miner.algotype (#87) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 17a179964f..c7d67376d3 100644 --- a/README.md +++ b/README.md @@ -121,7 +121,7 @@ $ geth --help MINER --miner.algotype value (default: "mev-geth") - Block building algorithm to use [=mev-geth] (mev-geth, greedy) + Block building algorithm to use [=mev-geth] (mev-geth, greedy, greedy-buckets) --miner.blocklist value flashbots - Path to JSON file with list of blocked addresses. Miner will ignore From 5ddc19baaa55a64e25f337183e47b0e564a33832 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Tue, 25 Jul 2023 20:53:54 -0500 Subject: [PATCH 13/46] Update godoc, remove unused getter for access list, add CLI flag to builder flag list, update log level for multi-tx-snap error --- cmd/geth/main.go | 1 + core/state/statedb.go | 4 ---- miner/algo_common.go | 4 ++-- miner/algo_greedy_buckets.go | 2 +- miner/worker.go | 3 ++- 5 files changed, 6 insertions(+), 8 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index cd4a004680..48c422e1f9 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -159,6 +159,7 @@ var ( builderApiFlags = []cli.Flag{ utils.BuilderEnabled, + utils.BuilderEnableMultiTxSnapshot, utils.BuilderEnableValidatorChecks, utils.BuilderBlockValidationBlacklistSourceFilePath, utils.BuilderEnableLocalRelay, diff --git a/core/state/statedb.go b/core/state/statedb.go index da2f3a3cbf..33684f95c2 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1201,10 +1201,6 @@ func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common. return ret } -func (s *StateDB) AccessList() *accessList { - return s.accessList -} - // MultiTxSnapshot creates new checkpoint for multi txs reverts func (s *StateDB) MultiTxSnapshot() error { if s.multiTxSnapshot != nil { diff --git a/miner/algo_common.go b/miner/algo_common.go index b4d78d7977..15b6c99b84 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -80,8 +80,8 @@ type ( // is 10 (i.e. 10%), then the minimum effective gas price included in the same bucket as the top transaction // is (1000 * 10%) = 100 wei. PriceCutoffPercent int - // EnableMultiTxSnap is true if we want to use multi-transaction snapshot - // for committing transactions (note: experimental) + // EnableMultiTxSnap is true if we want to use multi-transaction snapshot for committing transactions, + // which reduce state copies when reverting failed bundles (note: experimental) EnableMultiTxSnap bool } diff --git a/miner/algo_greedy_buckets.go b/miner/algo_greedy_buckets.go index c03492a5d1..861ad15558 100644 --- a/miner/algo_greedy_buckets.go +++ b/miner/algo_greedy_buckets.go @@ -62,7 +62,7 @@ func newGreedyBucketsBuilder( orders, ) if err != nil { - log.Debug("Error(s) building multi-tx snapshot block", "err", err) + log.Trace("Error(s) building multi-tx snapshot block", "err", err) } return builder.inputEnvironment, usedBundles, usedSbundles } diff --git a/miner/worker.go b/miner/worker.go index 57f8f78886..65986bacec 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1509,7 +1509,8 @@ func (w *worker) generateWork(params *generateParams) (*types.Block, *big.Int, e totalSbundles++ } - log.Info("Block finalized and assembled", "height", block.Number().String(), "blockProfit", ethIntToFloat(profit), + log.Info("Block finalized and assembled", + "height", block.Number().String(), "blockProfit", ethIntToFloat(profit), "txs", len(env.txs), "bundles", len(blockBundles), "okSbundles", okSbundles, "totalSbundles", totalSbundles, "gasUsed", block.GasUsed(), "time", time.Since(start)) if metrics.EnabledBuilder { From 6ccec93bf339390c465b816f33f3a51415bf0a26 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Tue, 25 Jul 2023 22:03:14 -0500 Subject: [PATCH 14/46] Fix linter errors --- core/state/multi_tx_snapshot.go | 3 +- core/state/multi_tx_snapshot_test.go | 54 +++++++++++++++------------- miner/algo_common.go | 3 +- miner/algo_greedy.go | 1 - miner/algo_greedy_buckets.go | 4 --- miner/env_changes.go | 5 ++- miner/env_changes_test.go | 5 +-- miner/environment_diff.go | 3 +- 8 files changed, 40 insertions(+), 38 deletions(-) diff --git a/core/state/multi_tx_snapshot.go b/core/state/multi_tx_snapshot.go index eabfc06993..ff927927be 100644 --- a/core/state/multi_tx_snapshot.go +++ b/core/state/multi_tx_snapshot.go @@ -1,8 +1,9 @@ package state import ( - "github.com/ethereum/go-ethereum/common" "math/big" + + "github.com/ethereum/go-ethereum/common" ) // MultiTxSnapshot retains StateDB changes for multiple transactions. diff --git a/core/state/multi_tx_snapshot_test.go b/core/state/multi_tx_snapshot_test.go index 2f32294e25..088b93c36d 100644 --- a/core/state/multi_tx_snapshot_test.go +++ b/core/state/multi_tx_snapshot_test.go @@ -3,18 +3,23 @@ package state import ( "bytes" "fmt" - "github.com/ethereum/go-ethereum/common" "math/big" "math/rand" "testing" + + "github.com/ethereum/go-ethereum/common" ) var ( addrs []common.Address keys []common.Hash + + rng *rand.Rand ) func init() { + rng = rand.New(rand.NewSource(0)) + for i := 0; i < 20; i++ { addrs = append(addrs, common.HexToAddress(fmt.Sprintf("0x%02x", i))) } @@ -69,7 +74,7 @@ func verifyObservableAccountState(s *StateDB, state *observableAccountState) err if s.GetNonce(state.address) != state.nonce { return fmt.Errorf("nonce mismatch %v != %v", s.GetNonce(state.address), state.nonce) } - if bytes.Compare(s.GetCode(state.address), state.code) != 0 { + if !bytes.Equal(s.GetCode(state.address), state.code) { return fmt.Errorf("code mismatch %v != %v", s.GetCode(state.address), state.code) } if s.GetCodeHash(state.address) != state.codeHash { @@ -102,7 +107,10 @@ func verifyObservableAccountState(s *StateDB, state *observableAccountState) err func randomBytes(n int) []byte { b := make([]byte, n) - rand.Read(b) + _, err := rng.Read(b) + if err != nil { + panic(err) + } return b } @@ -122,9 +130,9 @@ func randFillAccountState(addr common.Address, s *StateDB) { } func randFillAccount(addr common.Address, s *StateDB) { - s.SetNonce(addr, rand.Uint64()) - s.SetBalance(addr, big.NewInt(rand.Int63())) - s.SetCode(addr, randomBytes(rand.Intn(100))) + s.SetNonce(addr, rng.Uint64()) + s.SetBalance(addr, big.NewInt(rng.Int63())) + s.SetCode(addr, randomBytes(rng.Intn(100))) randFillAccountState(addr, s) } @@ -139,49 +147,47 @@ func prepareInitialState(s *StateDB) { afterCommitHooks = append(afterCommitHooks, afterCommit) } - rand.Seed(0) - addAccount(func(addr common.Address, s *StateDB) { - s.SetNonce(addr, rand.Uint64()) + s.SetNonce(addr, rng.Uint64()) }, nil) addAccount(nil, func(addr common.Address, s *StateDB) { - s.SetNonce(addr, rand.Uint64()) + s.SetNonce(addr, rng.Uint64()) }) addAccount(func(addr common.Address, s *StateDB) { - s.SetNonce(addr, rand.Uint64()) + s.SetNonce(addr, rng.Uint64()) }, func(addr common.Address, s *StateDB) { - s.SetNonce(addr, rand.Uint64()) + s.SetNonce(addr, rng.Uint64()) }) addAccount(func(addr common.Address, s *StateDB) { - s.SetBalance(addr, big.NewInt(rand.Int63())) + s.SetBalance(addr, big.NewInt(rng.Int63())) }, nil) addAccount(nil, func(addr common.Address, s *StateDB) { - s.SetBalance(addr, big.NewInt(rand.Int63())) + s.SetBalance(addr, big.NewInt(rng.Int63())) }) addAccount(func(addr common.Address, s *StateDB) { - s.SetBalance(addr, big.NewInt(rand.Int63())) + s.SetBalance(addr, big.NewInt(rng.Int63())) }, func(addr common.Address, s *StateDB) { - s.SetBalance(addr, big.NewInt(rand.Int63())) + s.SetBalance(addr, big.NewInt(rng.Int63())) }) addAccount(func(addr common.Address, s *StateDB) { - s.SetCode(addr, randomBytes(rand.Intn(100))) + s.SetCode(addr, randomBytes(rng.Intn(100))) }, nil) addAccount(nil, func(addr common.Address, s *StateDB) { - s.SetCode(addr, randomBytes(rand.Intn(100))) + s.SetCode(addr, randomBytes(rng.Intn(100))) }) addAccount(func(addr common.Address, s *StateDB) { - s.SetCode(addr, randomBytes(rand.Intn(100))) + s.SetCode(addr, randomBytes(rng.Intn(100))) s.SetCode(addr, nil) }, func(addr common.Address, s *StateDB) { - s.SetCode(addr, randomBytes(rand.Intn(100))) + s.SetCode(addr, randomBytes(rng.Intn(100))) }) addAccount(func(addr common.Address, s *StateDB) { - s.SetCode(addr, randomBytes(rand.Intn(100))) + s.SetCode(addr, randomBytes(rng.Intn(100))) s.Suicide(addr) }, func(addr common.Address, s *StateDB) { - s.SetCode(addr, randomBytes(rand.Intn(100))) + s.SetCode(addr, randomBytes(rng.Intn(100))) }) addAccount(func(addr common.Address, s *StateDB) { @@ -277,7 +283,7 @@ func testMutliTxSnapshot(t *testing.T, actions func(s *StateDB)) { if len(s.state.stateObjectsPending) != len(pendingAddressesBefore) { t.Error("pending state objects count mismatch", "got", len(s.state.stateObjectsPending), "expected", len(pendingAddressesBefore)) } - for k, _ := range s.state.stateObjectsPending { + for k := range s.state.stateObjectsPending { if _, ok := pendingAddressesBefore[k]; !ok { t.Error("stateObjectsPending mismatch, before was nil", "address", k) } @@ -285,7 +291,7 @@ func testMutliTxSnapshot(t *testing.T, actions func(s *StateDB)) { if len(s.state.stateObjectsDirty) != len(dirtyAddressesBefore) { t.Error("dirty state objects count mismatch", "got", len(s.state.stateObjectsDirty), "expected", len(dirtyAddressesBefore)) } - for k, _ := range s.state.stateObjectsDirty { + for k := range s.state.stateObjectsDirty { if _, ok := dirtyAddressesBefore[k]; !ok { t.Error("stateObjectsDirty mismatch, before was nil", "address", k) } diff --git a/miner/algo_common.go b/miner/algo_common.go index 15b6c99b84..1adcf9ccd4 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -299,11 +299,10 @@ func BuildMultiTxSnapBlock( chData chainData, algoConf algorithmConfig, orders *types.TransactionsByPriceAndNonce) ([]types.SimulatedBundle, []types.UsedSBundle, error) { - var ( usedBundles []types.SimulatedBundle usedSbundles []types.UsedSBundle - orderFailed = false + orderFailed bool buildBlockErrors []error ) diff --git a/miner/algo_greedy.go b/miner/algo_greedy.go index a2954bcc37..9c3c76b2ce 100644 --- a/miner/algo_greedy.go +++ b/miner/algo_greedy.go @@ -28,7 +28,6 @@ func newGreedyBuilder( chain *core.BlockChain, chainConfig *params.ChainConfig, algoConf *algorithmConfig, blacklist map[common.Address]struct{}, env *environment, key *ecdsa.PrivateKey, interrupt *int32, ) (*greedyBuilder, error) { - if algoConf == nil { return nil, errNoAlgorithmConfig } diff --git a/miner/algo_greedy_buckets.go b/miner/algo_greedy_buckets.go index 861ad15558..7e907dac87 100644 --- a/miner/algo_greedy_buckets.go +++ b/miner/algo_greedy_buckets.go @@ -50,7 +50,6 @@ func newGreedyBucketsBuilder( if algoConf.EnableMultiTxSnap { buildBlockFunc = func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - orders := types.NewTransactionsByPriceAndNonce(builder.inputEnvironment.signer, transactions, simBundles, simSBundles, builder.inputEnvironment.header.BaseFee) @@ -69,7 +68,6 @@ func newGreedyBucketsBuilder( } else { buildBlockFunc = func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - orders := types.NewTransactionsByPriceAndNonce(builder.inputEnvironment.signer, transactions, simBundles, simSBundles, builder.inputEnvironment.header.BaseFee) @@ -88,7 +86,6 @@ func newGreedyBucketsBuilder( func CheckRetryOrderAndReinsert( order *types.TxWithMinerFee, orders *types.TransactionsByPriceAndNonce, retryMap map[*types.TxWithMinerFee]int, retryLimit int) bool { - var isRetryable bool = false if retryCount, exists := retryMap[order]; exists { if retryCount != retryLimit { @@ -281,6 +278,5 @@ func (b *greedyBucketsBuilder) mergeOrdersIntoEnvDiff( func (b *greedyBucketsBuilder) buildBlock(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - return b.buildBlockFunc(simBundles, simSBundles, transactions) } diff --git a/miner/env_changes.go b/miner/env_changes.go index 85baaade32..47a3ae98f1 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -4,12 +4,13 @@ import ( "crypto/ecdsa" "errors" "fmt" + "math/big" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" - "math/big" ) // envChanges is a helper struct to apply and revert changes to the environment @@ -283,7 +284,6 @@ func (c *envChanges) CommitSBundle(sbundle *types.SimSBundle, chData chainData, } func (c *envChanges) commitSBundle(sbundle *types.SBundle, chData chainData, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { - var ( // check inclusion minBlock = sbundle.Inclusion.BlockNumber @@ -396,7 +396,6 @@ func (c *envChanges) revert() error { func (c *envChanges) rollback( gasUsedBefore uint64, gasPoolBefore *core.GasPool, profitBefore *big.Int, txsBefore []*types.Transaction, receiptsBefore []*types.Receipt) { - c.usedGas = gasUsedBefore c.gasPool = gasPoolBefore c.txs = txsBefore diff --git a/miner/env_changes_test.go b/miner/env_changes_test.go index 82ba8bd913..f1225308bf 100644 --- a/miner/env_changes_test.go +++ b/miner/env_changes_test.go @@ -1,11 +1,12 @@ package miner import ( + "math/big" + "testing" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/require" - "math/big" - "testing" ) func TestTxCommitSnaps(t *testing.T) { diff --git a/miner/environment_diff.go b/miner/environment_diff.go index 05a6f07140..66e3dca4f1 100644 --- a/miner/environment_diff.go +++ b/miner/environment_diff.go @@ -4,12 +4,13 @@ import ( "crypto/ecdsa" "errors" "fmt" + "math/big" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" - "math/big" ) // environmentDiff is a helper struct used to apply transactions to a block using a copy of the state at that block From 91b5283818b276062f7f5c784fbc63fde3ebc239 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Thu, 27 Jul 2023 19:27:14 -0500 Subject: [PATCH 15/46] Add retry logic for multi-tx-snapshot block build algorithm --- miner/algo_common.go | 71 +++++++++++++++++++++++++++++++++--- miner/algo_greedy_buckets.go | 22 ----------- 2 files changed, 66 insertions(+), 27 deletions(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index 1adcf9ccd4..795b45efd2 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -299,9 +299,13 @@ func BuildMultiTxSnapBlock( chData chainData, algoConf algorithmConfig, orders *types.TransactionsByPriceAndNonce) ([]types.SimulatedBundle, []types.UsedSBundle, error) { + + const retryLimit = 1 + var ( usedBundles []types.SimulatedBundle usedSbundles []types.UsedSBundle + retryMap = make(map[*types.TxWithMinerFee]int) orderFailed bool buildBlockErrors []error ) @@ -320,7 +324,6 @@ func BuildMultiTxSnapBlock( return nil, nil, err } - // TODO: add support for retry logic if tx := order.Tx(); tx != nil { _, skip, err := changes.commitTx(tx, chData) switch skip { @@ -338,6 +341,20 @@ func BuildMultiTxSnapBlock( err = changes.commitBundle(bundle, chData) orders.Pop() if err != nil { + log.Trace("Could not apply bundle", "bundle", bundle.OriginalBundle.Hash, "err", err) + + var e *lowProfitError + if errors.As(err, &e) { + if e.ActualEffectiveGasPrice != nil { + order.SetPrice(e.ActualEffectiveGasPrice) + } + + if e.ActualProfit != nil { + order.SetProfit(e.ActualProfit) + } + + CheckRetryOrderAndReinsert(order, orders, retryMap, retryLimit) + } buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to commit bundle: %w", err)) orderFailed = true } else { @@ -348,14 +365,36 @@ func BuildMultiTxSnapBlock( Bundle: sbundle.Bundle, } err = changes.CommitSBundle(sbundle, chData, key, algoConf) + var ( + success = err == nil + canAppend = true // only append if we are not retrying the bundle + ) if err != nil { + log.Trace("Could not apply sbundle", "bundle", sbundle.Bundle.Hash(), "err", err) + + var e *lowProfitError + if errors.As(err, &e) { + if e.ActualEffectiveGasPrice != nil { + order.SetPrice(e.ActualEffectiveGasPrice) + } + + if e.ActualProfit != nil { + order.SetProfit(e.ActualProfit) + } + + // if the sbundle was not included due to low profit, we can retry the bundle + if ok := CheckRetryOrderAndReinsert(order, orders, retryMap, retryLimit); ok { + // don't append the sbundle to usedSbundles if we are retrying the bundle + canAppend = false + } + } buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to commit sbundle: %w", err)) orderFailed = true - usedEntry.Success = false - } else { - usedEntry.Success = true } - usedSbundles = append(usedSbundles, usedEntry) + if canAppend { + usedEntry.Success = success + usedSbundles = append(usedSbundles, usedEntry) + } } else { // note: this should never happen because we should not be inserting invalid transaction types into // the orders heap @@ -377,3 +416,25 @@ func BuildMultiTxSnapBlock( return usedBundles, usedSbundles, errors.Join(buildBlockErrors...) } + +// CheckRetryOrderAndReinsert checks if the order has been retried up to the retryLimit and if not, reinserts the order into the orders heap. +func CheckRetryOrderAndReinsert( + order *types.TxWithMinerFee, orders *types.TransactionsByPriceAndNonce, + retryMap map[*types.TxWithMinerFee]int, retryLimit int) bool { + var isRetryable bool = false + if retryCount, exists := retryMap[order]; exists { + if retryCount != retryLimit { + isRetryable = true + retryMap[order] = retryCount + 1 + } + } else { + retryMap[order] = 0 + isRetryable = true + } + + if isRetryable { + orders.Push(order) + } + + return isRetryable +} diff --git a/miner/algo_greedy_buckets.go b/miner/algo_greedy_buckets.go index 7e907dac87..fc9d290237 100644 --- a/miner/algo_greedy_buckets.go +++ b/miner/algo_greedy_buckets.go @@ -82,28 +82,6 @@ func newGreedyBucketsBuilder( return builder, nil } -// CheckRetryOrderAndReinsert checks if the order has been retried up to the retryLimit and if not, reinserts the order into the orders heap. -func CheckRetryOrderAndReinsert( - order *types.TxWithMinerFee, orders *types.TransactionsByPriceAndNonce, - retryMap map[*types.TxWithMinerFee]int, retryLimit int) bool { - var isRetryable bool = false - if retryCount, exists := retryMap[order]; exists { - if retryCount != retryLimit { - isRetryable = true - retryMap[order] = retryCount + 1 - } - } else { - retryMap[order] = 0 - isRetryable = true - } - - if isRetryable { - orders.Push(order) - } - - return isRetryable -} - // CutoffPriceFromOrder returns the cutoff price for a given order based on the cutoff percent. // For example, if the cutoff percent is 90, the cutoff price will be 90% of the order price, rounded down to the nearest integer. func CutoffPriceFromOrder(order *types.TxWithMinerFee, cutoffPercent int) *big.Int { From da04d6f9816353db58db9debb21a7e6ac28ecef0 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Thu, 27 Jul 2023 19:50:09 -0500 Subject: [PATCH 16/46] Update unit tests to test EnableMultiTxSnap --- miner/algo_test.go | 57 ++++++++++++++++++++++++++++------------------ 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/miner/algo_test.go b/miner/algo_test.go index d432e15b4a..9f9605e82a 100644 --- a/miner/algo_test.go +++ b/miner/algo_test.go @@ -141,25 +141,38 @@ func TestAlgo(t *testing.T) { for _, test := range algoTests { for _, algo := range test.SupportedAlgorithms { - testName := fmt.Sprintf("%s-%s", test.Name, algo.String()) - t.Run(testName, func(t *testing.T) { - alloc, txPool, bundles, err := test.build(signer, 1) - if err != nil { - t.Fatalf("Build: %v", err) - } - simBundles, err := simulateBundles(config, test.Header, alloc, bundles) - if err != nil { - t.Fatalf("Simulate Bundles: %v", err) - } + // test with multi-tx-snapshot enabled and disabled (default) + multiSnapDisabled := defaultAlgorithmConfig + multiSnapDisabled.EnableMultiTxSnap = false - gotProfit, err := runAlgoTest(algo, config, alloc, txPool, simBundles, test.Header, 1) - if err != nil { - t.Fatal(err) - } - if test.WantProfit.Cmp(gotProfit) != 0 { - t.Fatalf("Profit: want %v, got %v", test.WantProfit, gotProfit) - } - }) + multiSnapEnabled := defaultAlgorithmConfig + multiSnapEnabled.EnableMultiTxSnap = true + + algoConfigs := []algorithmConfig{ + multiSnapEnabled, + multiSnapDisabled, + } + for _, algoConf := range algoConfigs { + testName := fmt.Sprintf("%s-%s-%t", test.Name, algo.String(), algoConf.EnableMultiTxSnap) + + t.Run(testName, func(t *testing.T) { + alloc, txPool, bundles, err := test.build(signer, 1) + if err != nil { + t.Fatalf("Build: %v", err) + } + simBundles, err := simulateBundles(config, test.Header, alloc, bundles) + if err != nil { + t.Fatalf("Simulate Bundles: %v", err) + } + gotProfit, err := runAlgoTest(algo, algoConf, config, alloc, txPool, simBundles, test.Header, 1) + if err != nil { + t.Fatal(err) + } + if test.WantProfit.Cmp(gotProfit) != 0 { + t.Fatalf("Profit: want %v, got %v", test.WantProfit, gotProfit) + } + }) + } } } } @@ -203,7 +216,7 @@ func BenchmarkAlgo(b *testing.B) { } }() - gotProfit, err := runAlgoTest(algo, config, alloc, txPoolCopy, simBundles, test.Header, scale) + gotProfit, err := runAlgoTest(algo, defaultAlgorithmConfig, config, alloc, txPoolCopy, simBundles, test.Header, scale) if err != nil { b.Fatal(err) } @@ -218,7 +231,7 @@ func BenchmarkAlgo(b *testing.B) { } // runAlgo executes a single algoTest case and returns the profit. -func runAlgoTest(algo AlgoType, config *params.ChainConfig, alloc core.GenesisAlloc, +func runAlgoTest(algo AlgoType, algoConf algorithmConfig, config *params.ChainConfig, alloc core.GenesisAlloc, txPool map[common.Address]types.Transactions, bundles []types.SimulatedBundle, header *types.Header, scale int) (gotProfit *big.Int, err error) { var ( statedb, chData = genTestSetupWithAlloc(config, alloc) @@ -229,13 +242,13 @@ func runAlgoTest(algo AlgoType, config *params.ChainConfig, alloc core.GenesisAl // build block switch algo { case ALGO_GREEDY_BUCKETS: - builder, err := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) + builder, err := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) if err != nil { return nil, err } resultEnv, _, _ = builder.buildBlock(bundles, nil, txPool) case ALGO_GREEDY: - builder, err := newGreedyBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) + builder, err := newGreedyBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) if err != nil { return nil, err } From b04fd4b8a566959f96c79b53c2f05dd76d5d9959 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Thu, 27 Jul 2023 20:04:51 -0500 Subject: [PATCH 17/46] Rollback retry logic --- miner/algo_common.go | 46 ++++---------------------------------------- 1 file changed, 4 insertions(+), 42 deletions(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index 795b45efd2..ccfc4eb767 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -300,12 +300,9 @@ func BuildMultiTxSnapBlock( algoConf algorithmConfig, orders *types.TransactionsByPriceAndNonce) ([]types.SimulatedBundle, []types.UsedSBundle, error) { - const retryLimit = 1 - var ( usedBundles []types.SimulatedBundle usedSbundles []types.UsedSBundle - retryMap = make(map[*types.TxWithMinerFee]int) orderFailed bool buildBlockErrors []error ) @@ -342,59 +339,24 @@ func BuildMultiTxSnapBlock( orders.Pop() if err != nil { log.Trace("Could not apply bundle", "bundle", bundle.OriginalBundle.Hash, "err", err) - - var e *lowProfitError - if errors.As(err, &e) { - if e.ActualEffectiveGasPrice != nil { - order.SetPrice(e.ActualEffectiveGasPrice) - } - - if e.ActualProfit != nil { - order.SetProfit(e.ActualProfit) - } - - CheckRetryOrderAndReinsert(order, orders, retryMap, retryLimit) - } buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to commit bundle: %w", err)) orderFailed = true } else { usedBundles = append(usedBundles, *bundle) } } else if sbundle := order.SBundle(); sbundle != nil { + err = changes.CommitSBundle(sbundle, chData, key, algoConf) usedEntry := types.UsedSBundle{ - Bundle: sbundle.Bundle, + Bundle: sbundle.Bundle, + Success: err == nil, } - err = changes.CommitSBundle(sbundle, chData, key, algoConf) - var ( - success = err == nil - canAppend = true // only append if we are not retrying the bundle - ) if err != nil { log.Trace("Could not apply sbundle", "bundle", sbundle.Bundle.Hash(), "err", err) - var e *lowProfitError - if errors.As(err, &e) { - if e.ActualEffectiveGasPrice != nil { - order.SetPrice(e.ActualEffectiveGasPrice) - } - - if e.ActualProfit != nil { - order.SetProfit(e.ActualProfit) - } - - // if the sbundle was not included due to low profit, we can retry the bundle - if ok := CheckRetryOrderAndReinsert(order, orders, retryMap, retryLimit); ok { - // don't append the sbundle to usedSbundles if we are retrying the bundle - canAppend = false - } - } buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to commit sbundle: %w", err)) orderFailed = true } - if canAppend { - usedEntry.Success = success - usedSbundles = append(usedSbundles, usedEntry) - } + usedSbundles = append(usedSbundles, usedEntry) } else { // note: this should never happen because we should not be inserting invalid transaction types into // the orders heap From 597534fd08a477c0e11f02109413e12ecb5c188b Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Thu, 27 Jul 2023 20:45:52 -0500 Subject: [PATCH 18/46] Fix linter error --- miner/algo_common.go | 1 - 1 file changed, 1 deletion(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index ccfc4eb767..2e0b95dbbb 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -299,7 +299,6 @@ func BuildMultiTxSnapBlock( chData chainData, algoConf algorithmConfig, orders *types.TransactionsByPriceAndNonce) ([]types.SimulatedBundle, []types.UsedSBundle, error) { - var ( usedBundles []types.SimulatedBundle usedSbundles []types.UsedSBundle From f282589b33ea60d86b695739ed9a674505916cbe Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Mon, 31 Jul 2023 12:47:49 -0500 Subject: [PATCH 19/46] Change account touch tracer to access list tracer for env changes --- miner/env_changes.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/miner/env_changes.go b/miner/env_changes.go index 47a3ae98f1..723b402377 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -86,7 +86,9 @@ func (c *envChanges) commitTx(tx *types.Transaction, chData chainData) (*types.R } cfg := *chData.chain.GetVMConfig() - touchTracer := logger.NewAccountTouchTracer() + // we set precompile to nil, but they are set in the validation code + // there will be no difference in the result if precompile is not it the blocklist + touchTracer := logger.NewAccessListTracer(nil, common.Address{}, common.Address{}, nil) cfg.Tracer = touchTracer cfg.Debug = true @@ -128,8 +130,8 @@ func (c *envChanges) commitTx(tx *types.Transaction, chData chainData) (*types.R } } - for _, address := range touchTracer.TouchedAddresses() { - if _, in := chData.blacklist[address]; in { + for _, accessTuple := range touchTracer.AccessList() { + if _, in := chData.blacklist[accessTuple.Address]; in { c.rollback(usedGasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) return nil, popTx, errors.New("blacklist violation, tx trace") } From bacd7e480ee2af094e748f262b68e72aed0ebdfa Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Mon, 31 Jul 2023 14:03:39 -0500 Subject: [PATCH 20/46] Update greedy builder to use passed in algorithm configuration rather than default --- miner/algo_greedy.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/miner/algo_greedy.go b/miner/algo_greedy.go index 9c3c76b2ce..7650563ff1 100644 --- a/miner/algo_greedy.go +++ b/miner/algo_greedy.go @@ -106,7 +106,7 @@ func (b *greedyBuilder) mergeOrdersIntoEnvDiff( } } else if bundle := order.Bundle(); bundle != nil { //log.Debug("buildBlock considering bundle", "egp", bundle.MevGasPrice.String(), "hash", bundle.OriginalBundle.Hash) - err := envDiff.commitBundle(bundle, b.chainData, b.interrupt, defaultAlgorithmConfig) + err := envDiff.commitBundle(bundle, b.chainData, b.interrupt, b.algoConf) orders.Pop() if err != nil { log.Trace("Could not apply bundle", "bundle", bundle.OriginalBundle.Hash, "err", err) @@ -119,7 +119,7 @@ func (b *greedyBuilder) mergeOrdersIntoEnvDiff( usedEntry := types.UsedSBundle{ Bundle: sbundle.Bundle, } - err := envDiff.commitSBundle(sbundle, b.chainData, b.interrupt, b.builderKey, defaultAlgorithmConfig) + err := envDiff.commitSBundle(sbundle, b.chainData, b.interrupt, b.builderKey, b.algoConf) orders.Pop() if err != nil { log.Trace("Could not apply sbundle", "bundle", sbundle.Bundle.Hash(), "err", err) From 49ba79b6795092879ca6272a26ece2b23022adae Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Mon, 31 Jul 2023 14:50:59 -0500 Subject: [PATCH 21/46] Rollback profit for env changes --- miner/env_changes.go | 1 + 1 file changed, 1 insertion(+) diff --git a/miner/env_changes.go b/miner/env_changes.go index 723b402377..93bc4dd6a1 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -402,6 +402,7 @@ func (c *envChanges) rollback( c.gasPool = gasPoolBefore c.txs = txsBefore c.receipts = receiptsBefore + c.profit.Set(profitBefore) } func (c *envChanges) apply() error { From d8cf812bf88fa7155555c5f42e0a0a38437813ca Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Mon, 31 Jul 2023 17:01:06 -0500 Subject: [PATCH 22/46] Fix bugs --- cmd/utils/flags.go | 9 ++++----- eth/backend.go | 5 ++++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 9e1bd014eb..f78b016fb7 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -582,7 +582,6 @@ var ( MinerBlocklistFileFlag = &cli.StringFlag{ Name: "miner.blocklist", Usage: "[NOTE: Deprecated, please use builder.blacklist] flashbots - Path to JSON file with list of blocked addresses. Miner will ignore txs that touch mentioned addresses.", - Value: "", Category: flags.MinerCategory, } MinerNewPayloadTimeout = &cli.DurationFlag{ @@ -705,7 +704,6 @@ var ( BuilderAlgoTypeFlag = &cli.StringFlag{ Name: "builder.algotype", Usage: "Block building algorithm to use [=mev-geth] (mev-geth, greedy, greedy-buckets)", - Value: "mev-geth", Category: flags.BuilderCategory, } @@ -734,7 +732,6 @@ var ( Usage: "Path to file containing blacklisted addresses, json-encoded list of strings. " + "Builder will ignore transactions that touch mentioned addresses. This flag is also used for block validation API.\n" + "NOTE: builder.validation_blacklist is deprecated and will be removed in the future in favor of builder.blacklist", - Value: "", Aliases: []string{"builder.validation_blacklist"}, Category: flags.BuilderCategory, } @@ -1676,7 +1673,9 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { // SetBuilderConfig applies node-related command line flags to the builder config. func SetBuilderConfig(ctx *cli.Context, cfg *builder.Config) { - cfg.Enabled = ctx.IsSet(BuilderEnabled.Name) + if ctx.IsSet(BuilderEnabled.Name) { + cfg.Enabled = ctx.Bool(BuilderEnabled.Name) + } cfg.EnableValidatorChecks = ctx.IsSet(BuilderEnableValidatorChecks.Name) cfg.EnableLocalRelay = ctx.IsSet(BuilderEnableLocalRelay.Name) cfg.SlotsInEpoch = ctx.Uint64(BuilderSlotsInEpoch.Name) @@ -1944,7 +1943,7 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { // NOTE: This flag takes precedence and will overwrite value set by MinerBlocklistFileFlag if ctx.IsSet(BuilderBlockValidationBlacklistSourceFilePath.Name) { - bytes, err := os.ReadFile(ctx.String(MinerBlocklistFileFlag.Name)) + bytes, err := os.ReadFile(ctx.String(BuilderBlockValidationBlacklistSourceFilePath.Name)) if err != nil { Fatalf("Failed to read blocklist file: %s", err) } diff --git a/eth/backend.go b/eth/backend.go index 0b68f292c2..b330f005cb 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -231,7 +231,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } eth.miner = miner.New(eth, &config.Miner, eth.blockchain.Config(), eth.EventMux(), eth.engine, eth.isLocalBlock) - eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData)) + err = eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData)) + if err != nil { + return nil, err + } eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} if eth.APIBackend.allowUnprotectedTxs { From 4317b0a600716b841d8ea5b55f3dafd1e9415b2d Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Tue, 1 Aug 2023 22:25:26 -0500 Subject: [PATCH 23/46] Add new multi-transaction snapshot stack to support more than one active snapshot, useful for cases like nested bundle applying and rollback, optional bundle discard, and bundle merging --- core/state/multi_tx_snapshot.go | 274 ++++++++++++++++++++++++++- core/state/multi_tx_snapshot_test.go | 19 +- core/state/state_object.go | 10 +- core/state/statedb.go | 96 ++++++---- miner/env_changes.go | 7 +- 5 files changed, 357 insertions(+), 49 deletions(-) diff --git a/core/state/multi_tx_snapshot.go b/core/state/multi_tx_snapshot.go index ff927927be..b59735b960 100644 --- a/core/state/multi_tx_snapshot.go +++ b/core/state/multi_tx_snapshot.go @@ -1,8 +1,11 @@ package state import ( + "errors" "math/big" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/common" ) @@ -30,7 +33,12 @@ type MultiTxSnapshot struct { // NewMultiTxSnapshot creates a new MultiTxSnapshot func NewMultiTxSnapshot() *MultiTxSnapshot { - return &MultiTxSnapshot{ + multiTxSnapshot := newMultiTxSnapshot() + return &multiTxSnapshot +} + +func newMultiTxSnapshot() MultiTxSnapshot { + return MultiTxSnapshot{ numLogsAdded: make(map[common.Hash]int), prevObjects: make(map[common.Address]*stateObject), accountStorage: make(map[common.Address]map[common.Hash]*common.Hash), @@ -137,10 +145,10 @@ func (s *MultiTxSnapshot) updatePendingStorage(address common.Address, key, valu if s.objectChanged(address) { return } - if _, ok := s.accountStorage[address]; !ok { + if _, exists := s.accountStorage[address]; !exists { s.accountStorage[address] = make(map[common.Hash]*common.Hash) } - if _, ok := s.accountStorage[address][key]; ok { + if _, exists := s.accountStorage[address][key]; exists { return } if ok { @@ -170,6 +178,105 @@ func (s *MultiTxSnapshot) updateObjectDeleted(address common.Address, deleted bo } } +// Merge merges the changes from another snapshot into the current snapshot. +// The operation assumes that the other snapshot is later (newer) than the current snapshot. +// Changes are merged such that older state is retained and not overwritten. +// In other words, this method performs a union operation on two snapshots, where +// older values are retained and any new values are added to the current snapshot. +func (s *MultiTxSnapshot) Merge(other *MultiTxSnapshot) error { + if other.invalid || s.invalid { + return errors.New("failed to merge snapshots - invalid snapshot found") + } + + // each snapshot increments the number of logs per transaction hash + // when we merge snapshots, the number of logs added per transaction are appended to current snapshot + for txHash, numLogs := range other.numLogsAdded { + s.numLogsAdded[txHash] += numLogs + } + + // prevObjects contain mapping of address to state objects + // if the current snapshot has previous object for same address, retain previous object + // otherwise, add new object from other snapshot + for address, object := range other.prevObjects { + if _, exist := s.prevObjects[address]; !exist { + s.prevObjects[address] = object + } + } + + // merge account storage - + // we want to retain any existing storage values for a given account, + // update storage keys if they do not exist for a given account's storage, + // and update pending storage for accounts that don't already exist in current snapshot + for address, storage := range other.accountStorage { + for key, value := range storage { + if value == nil { + s.updatePendingStorage(address, key, types.EmptyCodeHash, false) + } else { + s.updatePendingStorage(address, key, common.BytesToHash(value.Bytes()), true) + } + } + } + + // add previous balance(s) for any addresses that don't exist in current snapshot + for address, balance := range other.accountBalance { + if _, exist := s.accountBalance[address]; !exist { + s.accountBalance[address] = balance + } + } + + // add previous nonce for accounts that don't exist in current snapshot + for address, nonce := range other.accountNonce { + if _, exist := s.accountNonce[address]; !exist { + s.accountNonce[address] = nonce + } + } + + // add previous code for accounts not found in current snapshot + for address, code := range other.accountCode { + if _, exist := s.accountCode[address]; !exist { + if _, found := other.accountCodeHash[address]; !found { + // every codeChange has code and code hash set - + // should never reach this point unless there is programming error + panic("snapshot merge found code but no code hash for account address") + } + + s.accountCode[address] = code + s.accountCodeHash[address] = other.accountCodeHash[address] + } + } + + // add previous suicide for addresses not in current snapshot + for address, suicided := range other.accountSuicided { + if _, exist := s.accountSuicided[address]; !exist { + s.accountSuicided[address] = suicided + } else { + return errors.New("failed to merge snapshots - duplicate found for account suicide") + } + } + + // add previous account deletions if they don't exist + for address, deleted := range other.accountDeleted { + if _, exist := s.accountDeleted[address]; !exist { + s.accountDeleted[address] = deleted + } + } + + // add previous pending status if not found + for address := range other.accountNotPending { + if _, exist := s.accountNotPending[address]; !exist { + s.accountNotPending[address] = struct{}{} + } + } + + for address := range other.accountNotDirty { + if _, exist := s.accountNotDirty[address]; !exist { + s.accountNotDirty[address] = struct{}{} + } + } + + return nil +} + // revertState reverts the state to the snapshot. func (s *MultiTxSnapshot) revertState(st *StateDB) { // remove all the logs added @@ -232,3 +339,164 @@ func (s *MultiTxSnapshot) revertState(st *StateDB) { delete(st.stateObjectsDirty, address) } } + +// MultiTxSnapshotStack contains a list of snapshots for multiple transactions associated with a StateDB. +// Intended use is as follows: +// - Create a new snapshot and push on top of the stack +// - Apply transactions to state and update head snapshot with changes from journal +// - If any changes applied to state database are committed to trie, invalidate the head snapshot +// - If applied changes are not desired, revert the changes from the head snapshot and pop the snapshot from the stack +// - If applied changes are desired, commit the changes from the head snapshot by merging with previous entry +// and pop the snapshot from the stack +type MultiTxSnapshotStack struct { + snapshots []MultiTxSnapshot + state *StateDB +} + +// NewMultiTxSnapshotStack creates a new MultiTxSnapshotStack with a given StateDB. +func NewMultiTxSnapshotStack(state *StateDB) *MultiTxSnapshotStack { + return &MultiTxSnapshotStack{ + snapshots: make([]MultiTxSnapshot, 0), + state: state, + } +} + +// NewSnapshot creates a new snapshot and pushes it on top of the stack. +func (stack *MultiTxSnapshotStack) NewSnapshot() (*MultiTxSnapshot, error) { + if len(stack.snapshots) > 0 && stack.snapshots[len(stack.snapshots)-1].invalid { + return nil, errors.New("failed to create new multi-transaction snapshot - invalid snapshot found at head") + } + + snap := newMultiTxSnapshot() + stack.snapshots = append(stack.snapshots, snap) + return &snap, nil +} + +// Peek returns the snapshot at the top of the stack. +func (stack *MultiTxSnapshotStack) Peek() *MultiTxSnapshot { + if len(stack.snapshots) == 0 { + return nil + } + return &stack.snapshots[len(stack.snapshots)-1] +} + +// Pop removes the snapshot at the top of the stack and returns it. +func (stack *MultiTxSnapshotStack) Pop() (*MultiTxSnapshot, error) { + size := len(stack.snapshots) + if size == 0 { + return nil, errors.New("failed to revert multi-transaction snapshot - does not exist") + } + + head := &stack.snapshots[size-1] + stack.snapshots = stack.snapshots[:size-1] + return head, nil +} + +// Revert rewinds the changes from the head snapshot and removes it from the stack. +func (stack *MultiTxSnapshotStack) Revert() (*MultiTxSnapshot, error) { + size := len(stack.snapshots) + if size == 0 { + return nil, errors.New("failed to revert multi-transaction snapshot - does not exist") + } + + head := &stack.snapshots[size-1] + if head.invalid { + return nil, errors.New("failed to revert multi-transaction snapshot - invalid snapshot found") + } + + head.revertState(stack.state) + stack.snapshots = stack.snapshots[:size-1] + return head, nil +} + +// Commit merges the changes from the head snapshot with the previous snapshot and removes it from the stack. +func (stack *MultiTxSnapshotStack) Commit() (*MultiTxSnapshot, error) { + if len(stack.snapshots) == 0 { + return nil, errors.New("failed to commit multi-transaction snapshot - does not exist") + } + + if len(stack.snapshots) == 1 { + return stack.Pop() + } + + var ( + head *MultiTxSnapshot + err error + ) + if head, err = stack.Pop(); err != nil { + return nil, err + } + + current := stack.Peek() + if err = current.Merge(head); err != nil { + return nil, err + } + + stack.snapshots[len(stack.snapshots)-1] = *current + return head, nil +} + +// Size returns the number of snapshots in the stack. +func (stack *MultiTxSnapshotStack) Size() int { + return len(stack.snapshots) +} + +// Invalidate invalidates the latest snapshot. This is used when state changes are committed to trie. +func (stack *MultiTxSnapshotStack) Invalidate() { + // TODO: if latest snapshot is invalid, then all previous snapshots + // would also be invalidated, need to update logic to reflect that + size := len(stack.snapshots) + if size == 0 { + return + } + + head := stack.snapshots[size-1] + head.invalid = true + stack.snapshots = stack.snapshots[:0] + stack.snapshots = append(stack.snapshots, head) + //stack.snapshots[size-1].invalid = true +} + +// UpdatePendingStatus updates the pending status for an address. +func (stack *MultiTxSnapshotStack) UpdatePendingStatus(address common.Address, pending, dirty bool) { + if len(stack.snapshots) == 0 { + return + } + + current := stack.Peek() + current.updatePendingStatus(address, pending, dirty) + stack.snapshots[len(stack.snapshots)-1] = *current +} + +// UpdatePendingStorage updates the pending storage for an address. +func (stack *MultiTxSnapshotStack) UpdatePendingStorage(address common.Address, key, value common.Hash, ok bool) { + if len(stack.snapshots) == 0 { + return + } + + current := stack.Peek() + current.updatePendingStorage(address, key, value, ok) + stack.snapshots[len(stack.snapshots)-1] = *current +} + +// UpdateFromJournal updates the snapshot with the changes from the journal. +func (stack *MultiTxSnapshotStack) UpdateFromJournal(journal *journal) { + if len(stack.snapshots) == 0 { + return + } + + current := stack.Peek() + current.updateFromJournal(journal) + stack.snapshots[len(stack.snapshots)-1] = *current +} + +// UpdateObjectDeleted updates the snapshot with the object deletion. +func (stack *MultiTxSnapshotStack) UpdateObjectDeleted(address common.Address, deleted bool) { + if len(stack.snapshots) == 0 { + return + } + + current := stack.Peek() + current.updateObjectDeleted(address, deleted) + stack.snapshots[len(stack.snapshots)-1] = *current +} diff --git a/core/state/multi_tx_snapshot_test.go b/core/state/multi_tx_snapshot_test.go index 088b93c36d..4cf5ed2db2 100644 --- a/core/state/multi_tx_snapshot_test.go +++ b/core/state/multi_tx_snapshot_test.go @@ -18,8 +18,6 @@ var ( ) func init() { - rng = rand.New(rand.NewSource(0)) - for i := 0; i < 20; i++ { addrs = append(addrs, common.HexToAddress(fmt.Sprintf("0x%02x", i))) } @@ -141,6 +139,8 @@ func prepareInitialState(s *StateDB) { // for this we apply some changes // 1. Before calling intermediateRoot // 2. After calling intermediateRoot but before calling Finalise + rng = rand.New(rand.NewSource(0)) + var beforeCommitHooks, afterCommitHooks []func(addr common.Address, s *StateDB) addAccount := func(beforeCommit, afterCommit func(addr common.Address, s *StateDB)) { beforeCommitHooks = append(beforeCommitHooks, beforeCommit) @@ -241,7 +241,7 @@ func prepareInitialState(s *StateDB) { s.Finalise(true) } -func testMutliTxSnapshot(t *testing.T, actions func(s *StateDB)) { +func testMultiTxSnapshot(t *testing.T, actions func(s *StateDB)) { s := newStateTest() prepareInitialState(s.state) @@ -259,7 +259,8 @@ func testMutliTxSnapshot(t *testing.T, actions func(s *StateDB)) { dirtyAddressesBefore[k] = v } - err := s.state.MultiTxSnapshot() + //err := s.state.MultiTxSnapshot() + err := s.state.NewMultiTxSnapshot() if err != nil { t.Fatal("MultiTxSnapshot failed", err) } @@ -309,7 +310,7 @@ func testMutliTxSnapshot(t *testing.T, actions func(s *StateDB)) { } func TestMultiTxSnapshotAccountChangesSimple(t *testing.T) { - testMutliTxSnapshot(t, func(s *StateDB) { + testMultiTxSnapshot(t, func(s *StateDB) { for _, addr := range addrs { s.SetNonce(addr, 78) s.SetBalance(addr, big.NewInt(79)) @@ -320,7 +321,7 @@ func TestMultiTxSnapshotAccountChangesSimple(t *testing.T) { } func TestMultiTxSnapshotAccountChangesMultiTx(t *testing.T) { - testMutliTxSnapshot(t, func(s *StateDB) { + testMultiTxSnapshot(t, func(s *StateDB) { for _, addr := range addrs { s.SetNonce(addr, 78) s.SetBalance(addr, big.NewInt(79)) @@ -338,7 +339,7 @@ func TestMultiTxSnapshotAccountChangesMultiTx(t *testing.T) { } func TestMultiTxSnapshotAccountChangesSelfDestruct(t *testing.T) { - testMutliTxSnapshot(t, func(s *StateDB) { + testMultiTxSnapshot(t, func(s *StateDB) { for _, addr := range addrs { s.SetNonce(addr, 78) s.SetBalance(addr, big.NewInt(79)) @@ -361,7 +362,7 @@ func TestMultiTxSnapshotAccountChangesSelfDestruct(t *testing.T) { } func TestMultiTxSnapshotAccountChangesEmptyAccount(t *testing.T) { - testMutliTxSnapshot(t, func(s *StateDB) { + testMultiTxSnapshot(t, func(s *StateDB) { for _, addr := range addrs { s.SetNonce(addr, 78) s.SetBalance(addr, big.NewInt(79)) @@ -386,7 +387,7 @@ func TestMultiTxSnapshotAccountChangesEmptyAccount(t *testing.T) { } func TestMultiTxSnapshotStateChanges(t *testing.T) { - testMutliTxSnapshot(t, func(s *StateDB) { + testMultiTxSnapshot(t, func(s *StateDB) { for _, addr := range addrs { randFillAccountState(addr, s) } diff --git a/core/state/state_object.go b/core/state/state_object.go index 1f74ee606d..f47484c8d1 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -247,10 +247,12 @@ func (s *stateObject) setState(key, value common.Hash) { func (s *stateObject) finalise(prefetch bool) { slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) for key, value := range s.dirtyStorage { - if multiSnap := s.db.multiTxSnapshot; multiSnap != nil { - prev, ok := s.pendingStorage[key] - multiSnap.updatePendingStorage(s.address, key, prev, ok) - } + prev, ok := s.pendingStorage[key] + s.db.multiTxSnapshotStack.UpdatePendingStorage(s.address, key, prev, ok) + //if multiSnap := s.db.multiTxSnapshot; multiSnap != nil { + // prev, ok := s.pendingStorage[key] + // multiSnap.updatePendingStorage(s.address, key, prev, ok) + //} s.pendingStorage[key] = value if value != s.originStorage[key] { slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure diff --git a/core/state/statedb.go b/core/state/statedb.go index 33684f95c2..e3befb8f42 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -110,7 +110,9 @@ type StateDB struct { nextRevisionId int // Multi-Transaction Snapshot - multiTxSnapshot *MultiTxSnapshot + //multiTxSnapshot *MultiTxSnapshot + // Multi-Transaction Snapshot Stack + multiTxSnapshotStack *MultiTxSnapshotStack // Measurements gathered during execution for debugging purposes AccountReads time.Duration @@ -154,6 +156,8 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) transientStorage: newTransientStorage(), hasher: crypto.NewKeccakState(), } + + sdb.multiTxSnapshotStack = NewMultiTxSnapshotStack(sdb) if sdb.snaps != nil { if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil { sdb.snapAccounts = make(map[common.Hash][]byte) @@ -715,6 +719,7 @@ func (s *StateDB) Copy() *StateDB { journal: newJournal(), hasher: crypto.NewKeccakState(), } + state.multiTxSnapshotStack = NewMultiTxSnapshotStack(state) // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), @@ -845,9 +850,10 @@ func (s *StateDB) GetRefund() uint64 { // the journal as well as the refunds. Finalise, however, will not push any updates // into the tries just yet. Only IntermediateRoot or Commit will do that. func (s *StateDB) Finalise(deleteEmptyObjects bool) { - if multiSnap := s.multiTxSnapshot; multiSnap != nil { - multiSnap.updateFromJournal(s.journal) - } + s.multiTxSnapshotStack.UpdateFromJournal(s.journal) + //if multiSnap := s.multiTxSnapshot; multiSnap != nil { + // multiSnap.updateFromJournal(s.journal) + //} addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) for addr := range s.journal.dirties { obj, exist := s.stateObjects[addr] @@ -861,9 +867,10 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { continue } if obj.suicided || (deleteEmptyObjects && obj.empty()) { - if multiSnap := s.multiTxSnapshot; multiSnap != nil { - multiSnap.updateObjectDeleted(obj.address, obj.deleted) - } + s.multiTxSnapshotStack.UpdateObjectDeleted(obj.address, obj.deleted) + //if multiSnap := s.multiTxSnapshot; multiSnap != nil { + // multiSnap.updateObjectDeleted(obj.address, obj.deleted) + //} obj.deleted = true @@ -882,11 +889,17 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { } else { obj.finalise(true) // Prefetch slots in the background } - if multiSnap := s.multiTxSnapshot; multiSnap != nil { + + if s.multiTxSnapshotStack.Size() > 0 { _, wasPending := s.stateObjectsPending[addr] _, wasDirty := s.stateObjectsDirty[addr] - multiSnap.updatePendingStatus(addr, wasPending, wasDirty) + s.multiTxSnapshotStack.UpdatePendingStatus(addr, wasPending, wasDirty) } + //if multiSnap := s.multiTxSnapshot; multiSnap != nil { + // _, wasPending := s.stateObjectsPending[addr] + // _, wasDirty := s.stateObjectsDirty[addr] + //multiSnap.updatePendingStatus(addr, wasPending, wasDirty) + //} s.stateObjectsPending[addr] = struct{}{} s.stateObjectsDirty[addr] = struct{}{} @@ -909,9 +922,10 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) - if s.multiTxSnapshot != nil { - s.multiTxSnapshot.invalid = true - } + s.multiTxSnapshotStack.Invalidate() + //if s.multiTxSnapshot != nil { + // s.multiTxSnapshot.invalid = true + //} // If there was a trie prefetcher operating, it gets aborted and irrevocably // modified after we start retrieving tries. Remove it from the statedb after @@ -1201,31 +1215,49 @@ func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common. return ret } -// MultiTxSnapshot creates new checkpoint for multi txs reverts -func (s *StateDB) MultiTxSnapshot() error { - if s.multiTxSnapshot != nil { - return errors.New("multi tx snapshot already exists") +func (s *StateDB) NewMultiTxSnapshot() error { + _, err := s.multiTxSnapshotStack.NewSnapshot() + if err != nil { + return err } - s.multiTxSnapshot = NewMultiTxSnapshot() return nil } +// MultiTxSnapshot creates new checkpoint for multi txs reverts +//func (s *StateDB) MultiTxSnapshot() error { +// if s.multiTxSnapshot != nil { +// return errors.New("multi tx snapshot already exists") +// } +// s.multiTxSnapshot = NewMultiTxSnapshot() +// return nil +//} + func (s *StateDB) MultiTxSnapshotRevert() error { - if s.multiTxSnapshot == nil { - return errors.New("multi tx snapshot does not exist") - } - if s.multiTxSnapshot.invalid { - return errors.New("multi tx snapshot is invalid") - } - s.multiTxSnapshot.revertState(s) - s.multiTxSnapshot = nil - return nil -} + _, err := s.multiTxSnapshotStack.Revert() + return err +} + +//func (s *StateDB) MultiTxSnapshotRevert() error { +// if s.multiTxSnapshot == nil { +// return errors.New("multi tx snapshot does not exist") +// } +// if s.multiTxSnapshot.invalid { +// return errors.New("multi tx snapshot is invalid") +// } +// s.multiTxSnapshot.revertState(s) +// s.multiTxSnapshot = nil +// return nil +//} func (s *StateDB) MultiTxSnapshotDiscard() error { - if s.multiTxSnapshot == nil { - return errors.New("multi tx snapshot does not exist") - } - s.multiTxSnapshot = nil - return nil + _, err := s.multiTxSnapshotStack.Commit() + return err } + +//func (s *StateDB) MultiTxSnapshotDiscard() error { +// if s.multiTxSnapshot == nil { +// return errors.New("multi tx snapshot does not exist") +// } +// s.multiTxSnapshot = nil +// return nil +//} diff --git a/miner/env_changes.go b/miner/env_changes.go index 93bc4dd6a1..d90e9583f8 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -24,9 +24,12 @@ type envChanges struct { } func newEnvChanges(env *environment) (*envChanges, error) { - if err := env.state.MultiTxSnapshot(); err != nil { + if err := env.state.NewMultiTxSnapshot(); err != nil { return nil, err } + //if err := env.state.MultiTxSnapshot(); err != nil { + // return nil, err + //} return &envChanges{ env: env, @@ -158,6 +161,8 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat ) for _, tx := range bundle.OriginalBundle.Txs { + // TODO: Checks for base fee and dynamic fee txs should be moved to the transaction pool, + // similar to mev-share bundles. See SBundlesPool.validateTx() for reference. if hasBaseFee && tx.Type() == types.DynamicFeeTxType { // Sanity check for extremely large numbers if tx.GasFeeCap().BitLen() > 256 { From 0e3057b35aab79887c983bf728817b3ff50e0d92 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Fri, 4 Aug 2023 13:43:10 -0500 Subject: [PATCH 24/46] Clean up code, add comprehensive stack tests with fuzzing, fix edge cases where merge operation for stack commit was not properly updated --- core/state/multi_tx_snapshot.go | 110 +++++- core/state/multi_tx_snapshot_test.go | 502 ++++++++++++++++++++++++++- core/state/state_object.go | 5 +- core/state/statedb.go | 52 +-- miner/env_changes.go | 5 +- 5 files changed, 612 insertions(+), 62 deletions(-) diff --git a/core/state/multi_tx_snapshot.go b/core/state/multi_tx_snapshot.go index b59735b960..abce576519 100644 --- a/core/state/multi_tx_snapshot.go +++ b/core/state/multi_tx_snapshot.go @@ -2,9 +2,9 @@ package state import ( "errors" + "fmt" "math/big" - - "github.com/ethereum/go-ethereum/core/types" + "reflect" "github.com/ethereum/go-ethereum/common" ) @@ -53,6 +53,63 @@ func newMultiTxSnapshot() MultiTxSnapshot { } } +// Equal returns true if the two MultiTxSnapshot are equal +func (s *MultiTxSnapshot) Equal(other *MultiTxSnapshot) bool { + if other == nil { + return false + } + if s.invalid != other.invalid { + return false + } + + visited := make(map[common.Address]bool) + for address, obj := range other.prevObjects { + current, exist := s.prevObjects[address] + if !exist { + return false + } + if current == nil && obj != nil { + return false + } + + if current != nil && obj == nil { + return false + } + + visited[address] = true + } + + for address, obj := range s.prevObjects { + if visited[address] { + continue + } + + otherObject, exist := other.prevObjects[address] + if !exist { + return false + } + + if otherObject == nil && obj != nil { + return false + } + + if otherObject != nil && obj == nil { + return false + } + } + + return reflect.DeepEqual(s.numLogsAdded, other.numLogsAdded) && + reflect.DeepEqual(s.accountStorage, other.accountStorage) && + reflect.DeepEqual(s.accountBalance, other.accountBalance) && + reflect.DeepEqual(s.accountNonce, other.accountNonce) && + reflect.DeepEqual(s.accountCode, other.accountCode) && + reflect.DeepEqual(s.accountCodeHash, other.accountCodeHash) && + reflect.DeepEqual(s.accountSuicided, other.accountSuicided) && + reflect.DeepEqual(s.accountDeleted, other.accountDeleted) && + reflect.DeepEqual(s.accountNotPending, other.accountNotPending) && + reflect.DeepEqual(s.accountNotDirty, other.accountNotDirty) +} + // updateFromJournal updates the snapshot with the changes from the journal. func (s *MultiTxSnapshot) updateFromJournal(journal *journal) { for _, journalEntry := range journal.entries { @@ -208,17 +265,29 @@ func (s *MultiTxSnapshot) Merge(other *MultiTxSnapshot) error { // update storage keys if they do not exist for a given account's storage, // and update pending storage for accounts that don't already exist in current snapshot for address, storage := range other.accountStorage { + if s.objectChanged(address) { + continue + } + + if _, exist := s.accountStorage[address]; !exist { + s.accountStorage[address] = make(map[common.Hash]*common.Hash) + s.accountStorage[address] = storage + continue + } + for key, value := range storage { - if value == nil { - s.updatePendingStorage(address, key, types.EmptyCodeHash, false) - } else { - s.updatePendingStorage(address, key, common.BytesToHash(value.Bytes()), true) + if _, exists := s.accountStorage[address][key]; !exists { + s.accountStorage[address][key] = value } } } // add previous balance(s) for any addresses that don't exist in current snapshot for address, balance := range other.accountBalance { + if s.objectChanged(address) { + continue + } + if _, exist := s.accountBalance[address]; !exist { s.accountBalance[address] = balance } @@ -226,6 +295,9 @@ func (s *MultiTxSnapshot) Merge(other *MultiTxSnapshot) error { // add previous nonce for accounts that don't exist in current snapshot for address, nonce := range other.accountNonce { + if s.objectChanged(address) { + continue + } if _, exist := s.accountNonce[address]; !exist { s.accountNonce[address] = nonce } @@ -233,6 +305,9 @@ func (s *MultiTxSnapshot) Merge(other *MultiTxSnapshot) error { // add previous code for accounts not found in current snapshot for address, code := range other.accountCode { + if s.objectChanged(address) { + continue + } if _, exist := s.accountCode[address]; !exist { if _, found := other.accountCodeHash[address]; !found { // every codeChange has code and code hash set - @@ -247,6 +322,10 @@ func (s *MultiTxSnapshot) Merge(other *MultiTxSnapshot) error { // add previous suicide for addresses not in current snapshot for address, suicided := range other.accountSuicided { + if s.objectChanged(address) { + continue + } + if _, exist := s.accountSuicided[address]; !exist { s.accountSuicided[address] = suicided } else { @@ -256,6 +335,9 @@ func (s *MultiTxSnapshot) Merge(other *MultiTxSnapshot) error { // add previous account deletions if they don't exist for address, deleted := range other.accountDeleted { + if s.objectChanged(address) { + continue + } if _, exist := s.accountDeleted[address]; !exist { s.accountDeleted[address] = deleted } @@ -303,8 +385,14 @@ func (s *MultiTxSnapshot) revertState(st *StateDB) { for address, storage := range s.accountStorage { for key, value := range storage { if value == nil { + if _, ok := st.stateObjects[address].pendingStorage[key]; !ok { + panic(fmt.Sprintf("storage key %x not found in pending storage", key)) + } delete(st.stateObjects[address].pendingStorage, key) } else { + if _, ok := st.stateObjects[address].pendingStorage[key]; !ok { + panic(fmt.Sprintf("storage key %x not found in pending storage", key)) + } st.stateObjects[address].pendingStorage[key] = *value } } @@ -409,6 +497,16 @@ func (stack *MultiTxSnapshotStack) Revert() (*MultiTxSnapshot, error) { return head, nil } +// RevertAll reverts all snapshots in the stack. +func (stack *MultiTxSnapshotStack) RevertAll() (snapshot *MultiTxSnapshot, err error) { + for len(stack.snapshots) > 0 { + if snapshot, err = stack.Revert(); err != nil { + break + } + } + return +} + // Commit merges the changes from the head snapshot with the previous snapshot and removes it from the stack. func (stack *MultiTxSnapshotStack) Commit() (*MultiTxSnapshot, error) { if len(stack.snapshots) == 0 { diff --git a/core/state/multi_tx_snapshot_test.go b/core/state/multi_tx_snapshot_test.go index 4cf5ed2db2..50de20280d 100644 --- a/core/state/multi_tx_snapshot_test.go +++ b/core/state/multi_tx_snapshot_test.go @@ -5,6 +5,7 @@ import ( "fmt" "math/big" "math/rand" + "reflect" "testing" "github.com/ethereum/go-ethereum/common" @@ -21,7 +22,7 @@ func init() { for i := 0; i < 20; i++ { addrs = append(addrs, common.HexToAddress(fmt.Sprintf("0x%02x", i))) } - for i := 0; i < 10; i++ { + for i := 0; i < 100; i++ { keys = append(keys, common.HexToHash(fmt.Sprintf("0x%02x", i))) } } @@ -127,6 +128,25 @@ func randFillAccountState(addr common.Address, s *StateDB) { } } +func genRandomAccountState(seed int64) map[common.Address]map[common.Hash]common.Hash { + rng = rand.New(rand.NewSource(seed)) + + state := make(map[common.Address]map[common.Hash]common.Hash) + + for _, addr := range addrs { + state[addr] = make(map[common.Hash]common.Hash) + for i, key := range keys { + if i%5 == 0 { + state[addr][key] = common.BigToHash(common.Big0) + } else { + state[addr][key] = randomHash() + } + } + } + + return state +} + func randFillAccount(addr common.Address, s *StateDB) { s.SetNonce(addr, rng.Uint64()) s.SetBalance(addr, big.NewInt(rng.Int63())) @@ -259,7 +279,6 @@ func testMultiTxSnapshot(t *testing.T, actions func(s *StateDB)) { dirtyAddressesBefore[k] = v } - //err := s.state.MultiTxSnapshot() err := s.state.NewMultiTxSnapshot() if err != nil { t.Fatal("MultiTxSnapshot failed", err) @@ -399,3 +418,482 @@ func TestMultiTxSnapshotStateChanges(t *testing.T) { s.Finalise(true) }) } + +func testStackBasic(t *testing.T) { + for i := 0; i < 10; i++ { + testMultiTxSnapshot(t, func(s *StateDB) { + // when test starts, actions are performed after new snapshot is created + // we initialize additional snapshot on top of that + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("NewMultiTxSnapshot failed: %v", err) + t.FailNow() + } + + seed := rand.Int63() + stateMap := genRandomAccountState(seed) + for account, accountKeys := range stateMap { + for key, value := range accountKeys { + s.SetState(account, key, value) + } + } + s.Finalise(true) + + stack := s.multiTxSnapshotStack + + // the test starts with 1 snapshot, and we just created new one above + startSize := stack.Size() + if startSize != 2 { + t.Errorf("expected stack size to be 2, got %d", startSize) + t.FailNow() + } + + for _, addr := range addrs { + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("NewMultiTxSnapshot failed: %v", err) + t.FailNow() + } + randFillAccountState(addr, s) + s.Finalise(true) + } + afterAddrSize := stack.Size() + if afterAddrSize != startSize+len(addrs) { + t.Errorf("expected stack size to be %d, got %d", startSize+len(addrs), afterAddrSize) + t.FailNow() + } + + // the testMultiTxSnapshot subroutine calls MultiTxSnapshotRevert after applying actions + // we test here to make sure that the flattened commitments on the head of stack + // yield the same final root hash + // this ensures that we are properly flattening the stack on commit + for stack.Size() > 1 { + if _, err := stack.Commit(); err != nil { + t.Errorf("Commit failed: %v", err) + t.FailNow() + } + } + }) + } +} + +func testStackSelfDestruct(t *testing.T) { + testMultiTxSnapshot(t, func(s *StateDB) { + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("NewMultiTxSnapshot failed: %v", err) + t.FailNow() + } + for _, addr := range addrs { + s.SetNonce(addr, 78) + s.SetBalance(addr, big.NewInt(79)) + s.SetCode(addr, []byte{0x80}) + s.Finalise(true) + } + + for _, addr := range addrs { + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("NewMultiTxSnapshot failed: %v", err) + t.FailNow() + } + s.Suicide(addr) + } + stack := s.multiTxSnapshotStack + + // merge all the suicide operations + for stack.Size() > 1 { + if _, err := stack.Commit(); err != nil { + t.Errorf("Commit failed: %v", err) + t.FailNow() + } + } + s.Finalise(true) + + for _, addr := range addrs { + s.SetNonce(addr, 79) + s.SetBalance(addr, big.NewInt(80)) + s.SetCode(addr, []byte{0x81}) + } + s.Finalise(true) + }) +} + +func testStackAgainstSingleSnap(t *testing.T) { + // we generate a random seed ten times to fuzz test multiple stack snapshots against single layer snapshot + for i := 0; i < 10; i++ { + testMultiTxSnapshot(t, func(s *StateDB) { + original := s.Copy() + baselineStateDB := s.Copy() + + baselineRootHash, targetRootHash := baselineStateDB.originalRoot, s.originalRoot + + if !bytes.Equal(baselineRootHash.Bytes(), targetRootHash.Bytes()) { + t.Errorf("expected root hash to be %x, got %x", baselineRootHash, targetRootHash) + t.FailNow() + } + + // basic - add multiple snapshots and commit them, and compare them to single snapshot that has all + // state changes + + if err := baselineStateDB.NewMultiTxSnapshot(); err != nil { + t.Errorf("Error initializing snapshot: %v", err) + t.FailNow() + } + + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("Error initializing snapshot: %v", err) + t.FailNow() + } + + // we should be able to revert back to the same intermediate root hash + // for single snapshot and snapshot stack + seed := rand.Int63() + state := genRandomAccountState(seed) + for account, accountKeys := range state { + for key, value := range accountKeys { + baselineStateDB.SetState(account, key, value) + + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("Error initializing snapshot: %v", err) + t.FailNow() + } + s.SetState(account, key, value) + s.Finalise(true) + } + } + baselineStateDB.Finalise(true) + + // commit all but last snapshot + stack := s.multiTxSnapshotStack + for stack.Size() > 1 { + if _, err := stack.Commit(); err != nil { + t.Errorf("Commit failed: %v", err) + t.FailNow() + } + } + + var ( + baselineSnapshot = baselineStateDB.multiTxSnapshotStack.Peek() + targetSnapshot = s.multiTxSnapshotStack.Peek() + ) + if !targetSnapshot.Equal(baselineSnapshot) { + CompareAndPrintSnapshotMismatches(t, targetSnapshot, baselineSnapshot) + t.Errorf("expected snapshots to be equal") + t.FailNow() + } + + // revert back to previously calculated root hash + if err := baselineStateDB.MultiTxSnapshotRevert(); err != nil { + t.Errorf("MultiTxSnapshotRevert failed: %v", err) + t.FailNow() + } + + if err := s.MultiTxSnapshotRevert(); err != nil { + t.Errorf("MultiTxSnapshotRevert failed: %v", err) + t.FailNow() + } + + var err error + if targetRootHash, err = s.Commit(true); err != nil { + t.Errorf("Commit failed: %v", err) + t.FailNow() + } + + if baselineRootHash, err = baselineStateDB.Commit(true); err != nil { + t.Errorf("Commit failed: %v", err) + t.FailNow() + } + if !bytes.Equal(baselineRootHash.Bytes(), targetRootHash.Bytes()) { + t.Errorf("expected root hash to be %x, got %x", baselineRootHash, targetRootHash) + t.FailNow() + } + + *s = *original + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("Error initializing snapshot: %v", err) + t.FailNow() + } + }) + } +} + +func TestMultiTxSnapshotStack(t *testing.T) { + // test state changes are valid after merging snapshots + testStackBasic(t) + + // test self-destruct + testStackSelfDestruct(t) + + // test against baseline single snapshot + testStackAgainstSingleSnap(t) +} + +func CompareAndPrintSnapshotMismatches(t *testing.T, target, other *MultiTxSnapshot) { + var out bytes.Buffer + if target.Equal(other) { + t.Logf("Snapshots are equal") + return + } + + if target.invalid != other.invalid { + out.WriteString(fmt.Sprintf("invalid: %v != %v\n", target.invalid, other.invalid)) + return + } + + // check log mismatch + visited := make(map[common.Hash]bool) + for address, logCount := range other.numLogsAdded { + targetLogCount, exists := target.numLogsAdded[address] + if !exists { + out.WriteString(fmt.Sprintf("target<>other numLogsAdded[missing]: %v\n", address)) + continue + } + if targetLogCount != logCount { + out.WriteString(fmt.Sprintf("target<>other numLogsAdded[%x]: %v != %v\n", address, targetLogCount, logCount)) + } + } + + for address, logCount := range target.numLogsAdded { + if visited[address] { + continue + } + + otherLogCount, exists := other.numLogsAdded[address] + if !exists { + out.WriteString(fmt.Sprintf("other<>target numLogsAdded[missing]: %v\n", address)) + continue + } + + if otherLogCount != logCount { + out.WriteString(fmt.Sprintf("other<>target numLogsAdded[%x]: %v != %v\n", address, otherLogCount, logCount)) + } + } + + // check previous objects mismatch + for address := range other.prevObjects { + // TODO: we only check existence, need to add RLP comparison + _, exists := target.prevObjects[address] + if !exists { + out.WriteString(fmt.Sprintf("target<>other prevObjects[missing]: %v\n", address.String())) + continue + } + } + + for address, obj := range target.prevObjects { + otherObj, exists := other.prevObjects[address] + if !exists { + out.WriteString(fmt.Sprintf("other<>target prevObjects[missing]: %v\n", address)) + continue + } + if !reflect.DeepEqual(otherObj, obj) { + out.WriteString(fmt.Sprintf("other<>target prevObjects[%x]: %v != %v\n", address, otherObj, obj)) + } + } + + // check account storage mismatch + for account, storage := range other.accountStorage { + targetStorage, exists := target.accountStorage[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountStorage[missing]: %v\n", account)) + continue + } + + for key, value := range storage { + targetValue, exists := targetStorage[key] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountStorage[%s][missing]: %v\n", account.String(), key.String())) + continue + } + if !reflect.DeepEqual(targetValue, value) { + out.WriteString(fmt.Sprintf("target<>other accountStorage[%s][%s]: %v != %v\n", account.String(), key.String(), targetValue.String(), value.String())) + } + } + } + + for account, storage := range target.accountStorage { + otherStorage, exists := other.accountStorage[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountStorage[missing]: %v\n", account)) + continue + } + + for key, value := range storage { + otherValue, exists := otherStorage[key] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountStorage[%s][missing]: %v\n", account.String(), key.String())) + continue + } + if !reflect.DeepEqual(otherValue, value) { + out.WriteString(fmt.Sprintf("other<>target accountStorage[%s][%s]: %v != %v\n", account.String(), key.String(), otherValue.String(), value.String())) + } + } + } + + // check account balance mismatch + for account, balance := range other.accountBalance { + targetBalance, exists := target.accountBalance[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountBalance[missing]: %v\n", account)) + continue + } + if !reflect.DeepEqual(targetBalance, balance) { + out.WriteString(fmt.Sprintf("target<>other accountBalance[%x]: %v != %v\n", account, targetBalance, balance)) + } + } + + for account, balance := range target.accountBalance { + otherBalance, exists := other.accountBalance[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountBalance[missing]: %v\n", account)) + continue + } + if !bytes.Equal(otherBalance.Bytes(), balance.Bytes()) { + out.WriteString(fmt.Sprintf("other<>target accountBalance[%x]: %v != %v\n", account, otherBalance, balance)) + } + } + + // check account nonce mismatch + for account, nonce := range other.accountNonce { + targetNonce, exists := target.accountNonce[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountNonce[missing]: %v\n", account)) + continue + } + if targetNonce != nonce { + out.WriteString(fmt.Sprintf("target<>other accountNonce[%x]: %v != %v\n", account, targetNonce, nonce)) + } + } + + for account, nonce := range target.accountNonce { + otherNonce, exists := other.accountNonce[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountNonce[missing]: %v\n", account)) + continue + } + if otherNonce != nonce { + out.WriteString(fmt.Sprintf("other<>target accountNonce[%x]: %v != %v\n", account, otherNonce, nonce)) + } + } + + // check account code mismatch + for account, code := range other.accountCode { + targetCode, exists := target.accountCode[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountCode[missing]: %v\n", account)) + continue + } + if !bytes.Equal(targetCode, code) { + out.WriteString(fmt.Sprintf("target<>other accountCode[%x]: %v != %v\n", account, targetCode, code)) + } + } + + for account, code := range target.accountCode { + otherCode, exists := other.accountCode[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountCode[missing]: %v\n", account)) + continue + } + if !bytes.Equal(otherCode, code) { + out.WriteString(fmt.Sprintf("other<>target accountCode[%x]: %v != %v\n", account, otherCode, code)) + } + } + + // check account codeHash mismatch + for account, codeHash := range other.accountCodeHash { + targetCodeHash, exists := target.accountCodeHash[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountCodeHash[missing]: %v\n", account)) + continue + } + if !bytes.Equal(targetCodeHash, codeHash) { + out.WriteString(fmt.Sprintf("target<>other accountCodeHash[%x]: %v != %v\n", account, targetCodeHash, codeHash)) + } + } + + for account, codeHash := range target.accountCodeHash { + otherCodeHash, exists := other.accountCodeHash[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountCodeHash[missing]: %v\n", account)) + continue + } + if !bytes.Equal(otherCodeHash, codeHash) { + out.WriteString(fmt.Sprintf("other<>target accountCodeHash[%x]: %v != %v\n", account, otherCodeHash, codeHash)) + } + } + + // check account suicide mismatch + for account, suicide := range other.accountSuicided { + targetSuicide, exists := target.accountSuicided[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountSuicided[missing]: %v\n", account)) + continue + } + + if targetSuicide != suicide { + out.WriteString(fmt.Sprintf("target<>other accountSuicided[%x]: %t != %t\n", account, targetSuicide, suicide)) + } + } + + for account, suicide := range target.accountSuicided { + otherSuicide, exists := other.accountSuicided[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountSuicided[missing]: %v\n", account)) + continue + } + + if otherSuicide != suicide { + out.WriteString(fmt.Sprintf("other<>target accountSuicided[%x]: %t != %t\n", account, otherSuicide, suicide)) + } + } + + // check account deletion mismatch + for account, del := range other.accountDeleted { + targetDelete, exists := target.accountDeleted[account] + if !exists { + out.WriteString(fmt.Sprintf("target<>other accountDeleted[missing]: %v\n", account)) + continue + } + + if targetDelete != del { + out.WriteString(fmt.Sprintf("target<>other accountDeleted[%x]: %v != %v\n", account, targetDelete, del)) + } + } + + for account, del := range target.accountDeleted { + otherDelete, exists := other.accountDeleted[account] + if !exists { + out.WriteString(fmt.Sprintf("other<>target accountDeleted[missing]: %v\n", account)) + continue + } + + if otherDelete != del { + out.WriteString(fmt.Sprintf("other<>target accountDeleted[%x]: %v != %v\n", account, otherDelete, del)) + } + } + + // check account not pending mismatch + for account := range other.accountNotPending { + if _, exists := target.accountNotPending[account]; !exists { + out.WriteString(fmt.Sprintf("target<>other accountNotPending[missing]: %v\n", account)) + } + } + + for account := range target.accountNotPending { + if _, exists := other.accountNotPending[account]; !exists { + out.WriteString(fmt.Sprintf("other<>target accountNotPending[missing]: %v\n", account)) + } + } + + // check account not dirty mismatch + for account := range other.accountNotDirty { + if _, exists := target.accountNotDirty[account]; !exists { + out.WriteString(fmt.Sprintf("target<>other accountNotDirty[missing]: %v\n", account)) + } + } + + for account := range target.accountNotDirty { + if _, exists := other.accountNotDirty[account]; !exists { + out.WriteString(fmt.Sprintf("other<>target accountNotDirty[missing]: %v\n", account)) + } + } + + fmt.Println(out.String()) + out.Reset() +} diff --git a/core/state/state_object.go b/core/state/state_object.go index f47484c8d1..cd720019db 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -249,10 +249,7 @@ func (s *stateObject) finalise(prefetch bool) { for key, value := range s.dirtyStorage { prev, ok := s.pendingStorage[key] s.db.multiTxSnapshotStack.UpdatePendingStorage(s.address, key, prev, ok) - //if multiSnap := s.db.multiTxSnapshot; multiSnap != nil { - // prev, ok := s.pendingStorage[key] - // multiSnap.updatePendingStorage(s.address, key, prev, ok) - //} + s.pendingStorage[key] = value if value != s.originStorage[key] { slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure diff --git a/core/state/statedb.go b/core/state/statedb.go index e3befb8f42..a062bd3793 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -109,8 +109,6 @@ type StateDB struct { validRevisions []revision nextRevisionId int - // Multi-Transaction Snapshot - //multiTxSnapshot *MultiTxSnapshot // Multi-Transaction Snapshot Stack multiTxSnapshotStack *MultiTxSnapshotStack @@ -719,6 +717,7 @@ func (s *StateDB) Copy() *StateDB { journal: newJournal(), hasher: crypto.NewKeccakState(), } + // Initialize new multi-transaction snapshot stack for the copied state state.multiTxSnapshotStack = NewMultiTxSnapshotStack(state) // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { @@ -851,9 +850,7 @@ func (s *StateDB) GetRefund() uint64 { // into the tries just yet. Only IntermediateRoot or Commit will do that. func (s *StateDB) Finalise(deleteEmptyObjects bool) { s.multiTxSnapshotStack.UpdateFromJournal(s.journal) - //if multiSnap := s.multiTxSnapshot; multiSnap != nil { - // multiSnap.updateFromJournal(s.journal) - //} + addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) for addr := range s.journal.dirties { obj, exist := s.stateObjects[addr] @@ -868,9 +865,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { } if obj.suicided || (deleteEmptyObjects && obj.empty()) { s.multiTxSnapshotStack.UpdateObjectDeleted(obj.address, obj.deleted) - //if multiSnap := s.multiTxSnapshot; multiSnap != nil { - // multiSnap.updateObjectDeleted(obj.address, obj.deleted) - //} + //s.multiTxSnapshotStack.UpdateObjectDeleted(obj.address, obj.deleted) obj.deleted = true @@ -895,11 +890,6 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { _, wasDirty := s.stateObjectsDirty[addr] s.multiTxSnapshotStack.UpdatePendingStatus(addr, wasPending, wasDirty) } - //if multiSnap := s.multiTxSnapshot; multiSnap != nil { - // _, wasPending := s.stateObjectsPending[addr] - // _, wasDirty := s.stateObjectsDirty[addr] - //multiSnap.updatePendingStatus(addr, wasPending, wasDirty) - //} s.stateObjectsPending[addr] = struct{}{} s.stateObjectsDirty[addr] = struct{}{} @@ -922,10 +912,9 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) + // Intermediate root writes updates to the trie, which will cause + // in memory multi-transaction snapshot to be incompatible with the committed state, so we invalidate. s.multiTxSnapshotStack.Invalidate() - //if s.multiTxSnapshot != nil { - // s.multiTxSnapshot.invalid = true - //} // If there was a trie prefetcher operating, it gets aborted and irrevocably // modified after we start retrieving tries. Remove it from the statedb after @@ -1223,41 +1212,12 @@ func (s *StateDB) NewMultiTxSnapshot() error { return nil } -// MultiTxSnapshot creates new checkpoint for multi txs reverts -//func (s *StateDB) MultiTxSnapshot() error { -// if s.multiTxSnapshot != nil { -// return errors.New("multi tx snapshot already exists") -// } -// s.multiTxSnapshot = NewMultiTxSnapshot() -// return nil -//} - func (s *StateDB) MultiTxSnapshotRevert() error { _, err := s.multiTxSnapshotStack.Revert() return err } -//func (s *StateDB) MultiTxSnapshotRevert() error { -// if s.multiTxSnapshot == nil { -// return errors.New("multi tx snapshot does not exist") -// } -// if s.multiTxSnapshot.invalid { -// return errors.New("multi tx snapshot is invalid") -// } -// s.multiTxSnapshot.revertState(s) -// s.multiTxSnapshot = nil -// return nil -//} - -func (s *StateDB) MultiTxSnapshotDiscard() error { +func (s *StateDB) MultiTxSnapshotCommit() error { _, err := s.multiTxSnapshotStack.Commit() return err } - -//func (s *StateDB) MultiTxSnapshotDiscard() error { -// if s.multiTxSnapshot == nil { -// return errors.New("multi tx snapshot does not exist") -// } -// s.multiTxSnapshot = nil -// return nil -//} diff --git a/miner/env_changes.go b/miner/env_changes.go index 2ca2e3f376..6fc7cdc747 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -27,9 +27,6 @@ func newEnvChanges(env *environment) (*envChanges, error) { if err := env.state.NewMultiTxSnapshot(); err != nil { return nil, err } - //if err := env.state.MultiTxSnapshot(); err != nil { - // return nil, err - //} return &envChanges{ env: env, @@ -411,7 +408,7 @@ func (c *envChanges) rollback( } func (c *envChanges) apply() error { - if err := c.env.state.MultiTxSnapshotDiscard(); err != nil { + if err := c.env.state.MultiTxSnapshotCommit(); err != nil { return err } From a16791d58ff4183a8eb65c3fffba68702bb3eb55 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Fri, 4 Aug 2023 15:41:46 -0500 Subject: [PATCH 25/46] Fix linter error --- core/state/multi_tx_snapshot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state/multi_tx_snapshot.go b/core/state/multi_tx_snapshot.go index abce576519..a3728b69b9 100644 --- a/core/state/multi_tx_snapshot.go +++ b/core/state/multi_tx_snapshot.go @@ -83,7 +83,7 @@ func (s *MultiTxSnapshot) Equal(other *MultiTxSnapshot) bool { if visited[address] { continue } - + otherObject, exist := other.prevObjects[address] if !exist { return false From d6d5caeeaaca3b4a76f57f670265273729660050 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Sun, 6 Aug 2023 19:34:10 -0500 Subject: [PATCH 26/46] Add refund support to efficient revert so state returns to correct refund value on discard --- core/state/multi_tx_snapshot.go | 16 ++++-- core/state/multi_tx_snapshot_test.go | 75 ++++++++++++++++++++++------ 2 files changed, 73 insertions(+), 18 deletions(-) diff --git a/core/state/multi_tx_snapshot.go b/core/state/multi_tx_snapshot.go index a3728b69b9..0900ecc33c 100644 --- a/core/state/multi_tx_snapshot.go +++ b/core/state/multi_tx_snapshot.go @@ -28,16 +28,18 @@ type MultiTxSnapshot struct { accountNotPending map[common.Address]struct{} accountNotDirty map[common.Address]struct{} + + previousRefund uint64 // TODO: snapdestructs, snapaccount storage } // NewMultiTxSnapshot creates a new MultiTxSnapshot -func NewMultiTxSnapshot() *MultiTxSnapshot { - multiTxSnapshot := newMultiTxSnapshot() +func NewMultiTxSnapshot(previousRefund uint64) *MultiTxSnapshot { + multiTxSnapshot := newMultiTxSnapshot(previousRefund) return &multiTxSnapshot } -func newMultiTxSnapshot() MultiTxSnapshot { +func newMultiTxSnapshot(previousRefund uint64) MultiTxSnapshot { return MultiTxSnapshot{ numLogsAdded: make(map[common.Hash]int), prevObjects: make(map[common.Address]*stateObject), @@ -50,6 +52,7 @@ func newMultiTxSnapshot() MultiTxSnapshot { accountDeleted: make(map[common.Address]bool), accountNotPending: make(map[common.Address]struct{}), accountNotDirty: make(map[common.Address]struct{}), + previousRefund: previousRefund, } } @@ -361,6 +364,11 @@ func (s *MultiTxSnapshot) Merge(other *MultiTxSnapshot) error { // revertState reverts the state to the snapshot. func (s *MultiTxSnapshot) revertState(st *StateDB) { + // restore previous refund + if st.refund != s.previousRefund { + st.refund = s.previousRefund + } + // remove all the logs added for txhash, numLogs := range s.numLogsAdded { lens := len(st.logs[txhash]) @@ -455,7 +463,7 @@ func (stack *MultiTxSnapshotStack) NewSnapshot() (*MultiTxSnapshot, error) { return nil, errors.New("failed to create new multi-transaction snapshot - invalid snapshot found at head") } - snap := newMultiTxSnapshot() + snap := newMultiTxSnapshot(stack.state.refund) stack.snapshots = append(stack.snapshots, snap) return &snap, nil } diff --git a/core/state/multi_tx_snapshot_test.go b/core/state/multi_tx_snapshot_test.go index 50de20280d..607b458924 100644 --- a/core/state/multi_tx_snapshot_test.go +++ b/core/state/multi_tx_snapshot_test.go @@ -258,13 +258,21 @@ func prepareInitialState(s *StateDB) { afterHook(addrs[i], s) } } + s.Finalise(true) + + // NOTE(wazzymandias): + // We want to test refund is properly reverted for snapshots - state.StateDB clears refund on Finalise + // so refund is set here to emulate state with non-zero value. + s.AddRefund(rng.Uint64()) } func testMultiTxSnapshot(t *testing.T, actions func(s *StateDB)) { s := newStateTest() prepareInitialState(s.state) + previousRefund := s.state.GetRefund() + var obsStates []*observableAccountState for _, account := range addrs { obsStates = append(obsStates, getObservableAccountState(s.state, account, keys)) @@ -300,6 +308,10 @@ func testMultiTxSnapshot(t *testing.T, actions func(s *StateDB)) { } } + if s.state.GetRefund() != previousRefund { + t.Error("refund mismatch", "got", s.state.GetRefund(), "expected", previousRefund) + } + if len(s.state.stateObjectsPending) != len(pendingAddressesBefore) { t.Error("pending state objects count mismatch", "got", len(s.state.stateObjectsPending), "expected", len(pendingAddressesBefore)) } @@ -339,6 +351,18 @@ func TestMultiTxSnapshotAccountChangesSimple(t *testing.T) { }) } +func TestMultiTxSnapshotRefund(t *testing.T) { + testMultiTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + s.SetNonce(addr, 78) + s.SetBalance(addr, big.NewInt(79)) + s.SetCode(addr, []byte{0x80}) + } + s.Finalise(true) + s.AddRefund(1000) + }) +} + func TestMultiTxSnapshotAccountChangesMultiTx(t *testing.T) { testMultiTxSnapshot(t, func(s *StateDB) { for _, addr := range addrs { @@ -419,7 +443,7 @@ func TestMultiTxSnapshotStateChanges(t *testing.T) { }) } -func testStackBasic(t *testing.T) { +func TestStackBasic(t *testing.T) { for i := 0; i < 10; i++ { testMultiTxSnapshot(t, func(s *StateDB) { // when test starts, actions are performed after new snapshot is created @@ -475,7 +499,41 @@ func testStackBasic(t *testing.T) { } } -func testStackSelfDestruct(t *testing.T) { +func TestStackRefund(t *testing.T) { + testMultiTxSnapshot(t, func(s *StateDB) { + const counter = 10 + + s.AddRefund(500) + previousRefunds := make([]uint64, 0, counter) + previousRefunds = append(previousRefunds, s.GetRefund()) + + for i := 0; i < counter; i++ { + previousRefunds = append(previousRefunds, s.GetRefund()) + if err := s.NewMultiTxSnapshot(); err != nil { + t.Errorf("NewMultiTxSnapshot failed: %v", err) + t.FailNow() + } + s.Finalise(true) + s.AddRefund(1000) + } + + for i := 0; i < counter; i++ { + if err := s.MultiTxSnapshotRevert(); err != nil { + t.Errorf("MultiTxSnapshotRevert failed: %v", err) + t.FailNow() + } + actualRefund := s.GetRefund() + expectedRefund := previousRefunds[len(previousRefunds)-1] + if actualRefund != expectedRefund { + t.Errorf("expected refund to be %d, got %d", expectedRefund, actualRefund) + t.FailNow() + } + previousRefunds = previousRefunds[:len(previousRefunds)-1] + } + }) +} + +func TestStackSelfDestruct(t *testing.T) { testMultiTxSnapshot(t, func(s *StateDB) { if err := s.NewMultiTxSnapshot(); err != nil { t.Errorf("NewMultiTxSnapshot failed: %v", err) @@ -515,7 +573,7 @@ func testStackSelfDestruct(t *testing.T) { }) } -func testStackAgainstSingleSnap(t *testing.T) { +func TestStackAgainstSingleSnap(t *testing.T) { // we generate a random seed ten times to fuzz test multiple stack snapshots against single layer snapshot for i := 0; i < 10; i++ { testMultiTxSnapshot(t, func(s *StateDB) { @@ -614,17 +672,6 @@ func testStackAgainstSingleSnap(t *testing.T) { } } -func TestMultiTxSnapshotStack(t *testing.T) { - // test state changes are valid after merging snapshots - testStackBasic(t) - - // test self-destruct - testStackSelfDestruct(t) - - // test against baseline single snapshot - testStackAgainstSingleSnap(t) -} - func CompareAndPrintSnapshotMismatches(t *testing.T, target, other *MultiTxSnapshot) { var out bytes.Buffer if target.Equal(other) { From 52ddc82d3c6e4042f4b15f7fd85bd34dbe62e5e9 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Mon, 7 Aug 2023 10:07:06 -0500 Subject: [PATCH 27/46] Revert some refactor changes Address PR feedback, separate block build function initializing to de-dupe logic, split types into separate definitions Add comment Add panic if copy is performed with non-empty stack of snapshots Update env changes method name Update comments Fix unit test and update log to trace Remove refund in case it causes bad state --- cmd/utils/flags.go | 1 - core/state/access_list.go | 8 +- core/state/multi_tx_snapshot.go | 21 ++-- core/state/multi_tx_snapshot_test.go | 44 +------- core/state/statedb.go | 27 ++--- miner/algo_common.go | 145 ++++++++++++++++++--------- miner/algo_greedy.go | 44 ++------ miner/algo_greedy_buckets.go | 34 +------ miner/env_changes.go | 60 ++++++++--- miner/env_changes_test.go | 28 ++++-- miner/environment_diff.go | 45 +++++++-- 11 files changed, 236 insertions(+), 221 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 9219617c9b..42c7b7ac1d 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1972,7 +1972,6 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { } } - cfg.DiscardRevertibleTxOnErr = ctx.Bool(BuilderDiscardRevertibleTxOnErr.Name) cfg.EnableMultiTransactionSnapshot = ctx.Bool(BuilderEnableMultiTxSnapshot.Name) cfg.PriceCutoffPercent = ctx.Int(BuilderPriceCutoffPercentFlag.Name) } diff --git a/core/state/access_list.go b/core/state/access_list.go index 0ff5c3db6e..4194691345 100644 --- a/core/state/access_list.go +++ b/core/state/access_list.go @@ -55,13 +55,13 @@ func newAccessList() *accessList { } // Copy creates an independent copy of an accessList. -func (al *accessList) Copy() *accessList { +func (a *accessList) Copy() *accessList { cp := newAccessList() - for k, v := range al.addresses { + for k, v := range a.addresses { cp.addresses[k] = v } - cp.slots = make([]map[common.Hash]struct{}, len(al.slots)) - for i, slotMap := range al.slots { + cp.slots = make([]map[common.Hash]struct{}, len(a.slots)) + for i, slotMap := range a.slots { newSlotmap := make(map[common.Hash]struct{}, len(slotMap)) for k := range slotMap { newSlotmap[k] = struct{}{} diff --git a/core/state/multi_tx_snapshot.go b/core/state/multi_tx_snapshot.go index 0900ecc33c..022cb76220 100644 --- a/core/state/multi_tx_snapshot.go +++ b/core/state/multi_tx_snapshot.go @@ -29,17 +29,16 @@ type MultiTxSnapshot struct { accountNotPending map[common.Address]struct{} accountNotDirty map[common.Address]struct{} - previousRefund uint64 // TODO: snapdestructs, snapaccount storage } // NewMultiTxSnapshot creates a new MultiTxSnapshot -func NewMultiTxSnapshot(previousRefund uint64) *MultiTxSnapshot { - multiTxSnapshot := newMultiTxSnapshot(previousRefund) +func NewMultiTxSnapshot() *MultiTxSnapshot { + multiTxSnapshot := newMultiTxSnapshot() return &multiTxSnapshot } -func newMultiTxSnapshot(previousRefund uint64) MultiTxSnapshot { +func newMultiTxSnapshot() MultiTxSnapshot { return MultiTxSnapshot{ numLogsAdded: make(map[common.Hash]int), prevObjects: make(map[common.Address]*stateObject), @@ -52,7 +51,6 @@ func newMultiTxSnapshot(previousRefund uint64) MultiTxSnapshot { accountDeleted: make(map[common.Address]bool), accountNotPending: make(map[common.Address]struct{}), accountNotDirty: make(map[common.Address]struct{}), - previousRefund: previousRefund, } } @@ -135,7 +133,8 @@ func (s *MultiTxSnapshot) updateFromJournal(journal *journal) { } } -// objectChanged returns whether the object was changed (in the set of prevObjects). +// objectChanged returns whether the object was changed (in the set of prevObjects), which can happen +// because of self-destructs and deployments. func (s *MultiTxSnapshot) objectChanged(address common.Address) bool { _, ok := s.prevObjects[address] return ok @@ -364,11 +363,6 @@ func (s *MultiTxSnapshot) Merge(other *MultiTxSnapshot) error { // revertState reverts the state to the snapshot. func (s *MultiTxSnapshot) revertState(st *StateDB) { - // restore previous refund - if st.refund != s.previousRefund { - st.refund = s.previousRefund - } - // remove all the logs added for txhash, numLogs := range s.numLogsAdded { lens := len(st.logs[txhash]) @@ -463,7 +457,7 @@ func (stack *MultiTxSnapshotStack) NewSnapshot() (*MultiTxSnapshot, error) { return nil, errors.New("failed to create new multi-transaction snapshot - invalid snapshot found at head") } - snap := newMultiTxSnapshot(stack.state.refund) + snap := newMultiTxSnapshot() stack.snapshots = append(stack.snapshots, snap) return &snap, nil } @@ -549,8 +543,6 @@ func (stack *MultiTxSnapshotStack) Size() int { // Invalidate invalidates the latest snapshot. This is used when state changes are committed to trie. func (stack *MultiTxSnapshotStack) Invalidate() { - // TODO: if latest snapshot is invalid, then all previous snapshots - // would also be invalidated, need to update logic to reflect that size := len(stack.snapshots) if size == 0 { return @@ -560,7 +552,6 @@ func (stack *MultiTxSnapshotStack) Invalidate() { head.invalid = true stack.snapshots = stack.snapshots[:0] stack.snapshots = append(stack.snapshots, head) - //stack.snapshots[size-1].invalid = true } // UpdatePendingStatus updates the pending status for an address. diff --git a/core/state/multi_tx_snapshot_test.go b/core/state/multi_tx_snapshot_test.go index 607b458924..77c4168334 100644 --- a/core/state/multi_tx_snapshot_test.go +++ b/core/state/multi_tx_snapshot_test.go @@ -260,11 +260,6 @@ func prepareInitialState(s *StateDB) { } s.Finalise(true) - - // NOTE(wazzymandias): - // We want to test refund is properly reverted for snapshots - state.StateDB clears refund on Finalise - // so refund is set here to emulate state with non-zero value. - s.AddRefund(rng.Uint64()) } func testMultiTxSnapshot(t *testing.T, actions func(s *StateDB)) { @@ -359,7 +354,6 @@ func TestMultiTxSnapshotRefund(t *testing.T) { s.SetCode(addr, []byte{0x80}) } s.Finalise(true) - s.AddRefund(1000) }) } @@ -499,40 +493,6 @@ func TestStackBasic(t *testing.T) { } } -func TestStackRefund(t *testing.T) { - testMultiTxSnapshot(t, func(s *StateDB) { - const counter = 10 - - s.AddRefund(500) - previousRefunds := make([]uint64, 0, counter) - previousRefunds = append(previousRefunds, s.GetRefund()) - - for i := 0; i < counter; i++ { - previousRefunds = append(previousRefunds, s.GetRefund()) - if err := s.NewMultiTxSnapshot(); err != nil { - t.Errorf("NewMultiTxSnapshot failed: %v", err) - t.FailNow() - } - s.Finalise(true) - s.AddRefund(1000) - } - - for i := 0; i < counter; i++ { - if err := s.MultiTxSnapshotRevert(); err != nil { - t.Errorf("MultiTxSnapshotRevert failed: %v", err) - t.FailNow() - } - actualRefund := s.GetRefund() - expectedRefund := previousRefunds[len(previousRefunds)-1] - if actualRefund != expectedRefund { - t.Errorf("expected refund to be %d, got %d", expectedRefund, actualRefund) - t.FailNow() - } - previousRefunds = previousRefunds[:len(previousRefunds)-1] - } - }) -} - func TestStackSelfDestruct(t *testing.T) { testMultiTxSnapshot(t, func(s *StateDB) { if err := s.NewMultiTxSnapshot(); err != nil { @@ -577,6 +537,10 @@ func TestStackAgainstSingleSnap(t *testing.T) { // we generate a random seed ten times to fuzz test multiple stack snapshots against single layer snapshot for i := 0; i < 10; i++ { testMultiTxSnapshot(t, func(s *StateDB) { + // Need to drop initial snapshot since copy requires empty snapshot stack + if err := s.MultiTxSnapshotRevert(); err != nil { + t.Fatalf("error reverting snapshot: %v", err) + } original := s.Copy() baselineStateDB := s.Copy() diff --git a/core/state/statedb.go b/core/state/statedb.go index a062bd3793..040f0f6ede 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -718,6 +718,11 @@ func (s *StateDB) Copy() *StateDB { hasher: crypto.NewKeccakState(), } // Initialize new multi-transaction snapshot stack for the copied state + // NOTE(wazzymandias): We avoid copying the snapshot stack from the original state + // because it may contain snapshots that are not valid for the copied state. + if s.multiTxSnapshotStack.Size() > 0 { + panic("cannot copy state with active multi-transaction snapshot stack") + } state.multiTxSnapshotStack = NewMultiTxSnapshotStack(state) // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { @@ -865,7 +870,6 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { } if obj.suicided || (deleteEmptyObjects && obj.empty()) { s.multiTxSnapshotStack.UpdateObjectDeleted(obj.address, obj.deleted) - //s.multiTxSnapshotStack.UpdateObjectDeleted(obj.address, obj.deleted) obj.deleted = true @@ -1204,20 +1208,17 @@ func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common. return ret } -func (s *StateDB) NewMultiTxSnapshot() error { - _, err := s.multiTxSnapshotStack.NewSnapshot() - if err != nil { - return err - } - return nil +func (s *StateDB) NewMultiTxSnapshot() (err error) { + _, err = s.multiTxSnapshotStack.NewSnapshot() + return } -func (s *StateDB) MultiTxSnapshotRevert() error { - _, err := s.multiTxSnapshotStack.Revert() - return err +func (s *StateDB) MultiTxSnapshotRevert() (err error) { + _, err = s.multiTxSnapshotStack.Revert() + return } -func (s *StateDB) MultiTxSnapshotCommit() error { - _, err := s.multiTxSnapshotStack.Commit() - return err +func (s *StateDB) MultiTxSnapshotCommit() (err error) { + _, err = s.multiTxSnapshotStack.Commit() + return } diff --git a/miner/algo_common.go b/miner/algo_common.go index db4dbcbc51..57aab284ab 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -35,7 +35,7 @@ var ( defaultAlgorithmConfig = algorithmConfig{ DropRevertibleTxOnErr: false, EnforceProfit: false, - ExpectedProfit: common.Big0, + ExpectedProfit: nil, ProfitThresholdPercent: defaultProfitThresholdPercent, PriceCutoffPercent: defaultPriceCutoffPercent, EnableMultiTxSnap: false, @@ -66,35 +66,50 @@ func (e *lowProfitError) Error() string { ) } -type ( - algorithmConfig struct { - // DropRevertibleTxOnErr is used when a revertible transaction has error on commit, and we wish to discard - // the transaction and continue processing the rest of a bundle or sbundle. - // Revertible transactions are specified as hashes that can revert in a bundle or sbundle. - DropRevertibleTxOnErr bool - // EnforceProfit is true if we want to enforce a minimum profit threshold - // for committing a transaction based on ProfitThresholdPercent - EnforceProfit bool - // ExpectedProfit should be set on a per-transaction basis when profit is enforced - ExpectedProfit *big.Int - // ProfitThresholdPercent is the minimum profit threshold for committing a transaction - ProfitThresholdPercent int // 0-100, e.g. 70 means 70% - // PriceCutoffPercent is the minimum effective gas price threshold used for bucketing transactions by price. - // For example if the top transaction in a list has an effective gas price of 1000 wei and PriceCutoffPercent - // is 10 (i.e. 10%), then the minimum effective gas price included in the same bucket as the top transaction - // is (1000 * 10%) = 100 wei. - PriceCutoffPercent int - // EnableMultiTxSnap is true if we want to use multi-transaction snapshot for committing transactions, - // which reduce state copies when reverting failed bundles (note: experimental) - EnableMultiTxSnap bool - } +type algorithmConfig struct { + // DropRevertibleTxOnErr is used when a revertible transaction has error on commit, and we wish to discard + // the transaction and continue processing the rest of a bundle or sbundle. + // Revertible transactions are specified as hashes that can revert in a bundle or sbundle. + DropRevertibleTxOnErr bool + // EnforceProfit is true if we want to enforce a minimum profit threshold + // for committing a transaction based on ProfitThresholdPercent + EnforceProfit bool + // ExpectedProfit should be set on a per-transaction basis when profit is enforced + ExpectedProfit *big.Int + // ProfitThresholdPercent is the minimum profit threshold for committing a transaction + ProfitThresholdPercent int // 0-100, e.g. 70 means 70% + // PriceCutoffPercent is the minimum effective gas price threshold used for bucketing transactions by price. + // For example if the top transaction in a list has an effective gas price of 1000 wei and PriceCutoffPercent + // is 10 (i.e. 10%), then the minimum effective gas price included in the same bucket as the top transaction + // is (1000 * 10%) = 100 wei. + PriceCutoffPercent int + // EnableMultiTxSnap is true if we want to use multi-transaction snapshot for committing transactions, + // which reduce state copies when reverting failed bundles (note: experimental) + EnableMultiTxSnap bool +} - chainData struct { - chainConfig *params.ChainConfig - chain *core.BlockChain - blacklist map[common.Address]struct{} - } +type chainData struct { + chainConfig *params.ChainConfig + chain *core.BlockChain + blacklist map[common.Address]struct{} +} +// PayoutTransactionParams holds parameters for committing a payout transaction, used in commitPayoutTx +type PayoutTransactionParams struct { + Amount *big.Int + BaseFee *big.Int + ChainData chainData + Gas uint64 + CommitFn CommitTxFunc + Receiver common.Address + Sender common.Address + SenderBalance *big.Int + SenderNonce uint64 + Signer types.Signer + PrivateKey *ecdsa.PrivateKey +} + +type ( // BuildBlockFunc is the function signature for building a block BuildBlockFunc func( simBundles []types.SimulatedBundle, @@ -103,22 +118,57 @@ type ( // CommitTxFunc is the function signature for committing a transaction CommitTxFunc func(*types.Transaction, chainData) (*types.Receipt, int, error) +) - // PayoutTransactionParams holds parameters for committing a payout transaction, used in commitPayoutTx - PayoutTransactionParams struct { - Amount *big.Int - BaseFee *big.Int - ChainData chainData - Gas uint64 - CommitFn CommitTxFunc - Receiver common.Address - Sender common.Address - SenderBalance *big.Int - SenderNonce uint64 - Signer types.Signer - PrivateKey *ecdsa.PrivateKey +func NewBuildBlockFunc( + inputEnvironment *environment, + builderKey *ecdsa.PrivateKey, + chData chainData, + algoConf algorithmConfig, + greedyBuckets *greedyBucketsBuilder, + greedy *greedyBuilder, +) BuildBlockFunc { + if algoConf.EnableMultiTxSnap { + return func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + orders := types.NewTransactionsByPriceAndNonce(inputEnvironment.signer, transactions, + simBundles, simSBundles, inputEnvironment.header.BaseFee) + + usedBundles, usedSbundles, err := BuildMultiTxSnapBlock( + inputEnvironment, + builderKey, + chData, + algoConf, + orders, + ) + if err != nil { + log.Trace("Error(s) building multi-tx snapshot block", "err", err) + } + return inputEnvironment, usedBundles, usedSbundles + } + } else if builder := greedyBuckets; builder != nil { + return func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + orders := types.NewTransactionsByPriceAndNonce(inputEnvironment.signer, transactions, + simBundles, simSBundles, inputEnvironment.header.BaseFee) + + envDiff := newEnvironmentDiff(inputEnvironment.copy()) + usedBundles, usedSbundles := builder.mergeOrdersIntoEnvDiff(envDiff, orders) + envDiff.applyToBaseEnv() + return envDiff.baseEnvironment, usedBundles, usedSbundles + } + } else if builder := greedy; builder != nil { + return func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + orders := types.NewTransactionsByPriceAndNonce(inputEnvironment.signer, transactions, + simBundles, simSBundles, inputEnvironment.header.BaseFee) + + envDiff := newEnvironmentDiff(inputEnvironment.copy()) + usedBundles, usedSbundles := builder.mergeOrdersIntoEnvDiff(envDiff, orders) + envDiff.applyToBaseEnv() + return envDiff.baseEnvironment, usedBundles, usedSbundles + } + } else { + panic("invalid call to build block function") } -) +} func checkInterrupt(i *int32) bool { return i != nil && atomic.LoadInt32(i) != commitInterruptNone @@ -307,6 +357,9 @@ func BuildMultiTxSnapBlock( chData chainData, algoConf algorithmConfig, orders *types.TransactionsByPriceAndNonce) ([]types.SimulatedBundle, []types.UsedSBundle, error) { + // NOTE(wazzymandias): BuildMultiTxSnapBlock uses envChanges which is different from envDiff struct. + // Eventually the structs should be consolidated but for now they represent the difference between using state + // copies for building blocks (envDiff) versus using MultiTxSnapshot (envChanges). var ( usedBundles []types.SimulatedBundle usedSbundles []types.UsedSBundle @@ -342,7 +395,7 @@ func BuildMultiTxSnapBlock( orderFailed = true } } else if bundle := order.Bundle(); bundle != nil { - err = changes.commitBundle(bundle, chData) + err = changes.commitBundle(bundle, chData, algoConf) orders.Pop() if err != nil { log.Trace("Could not apply bundle", "bundle", bundle.OriginalBundle.Hash, "err", err) @@ -371,9 +424,9 @@ func BuildMultiTxSnapBlock( } if orderFailed { - if err = changes.revert(); err != nil { - log.Error("Failed to revert changes with multi-transaction snapshot", "err", err) - buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to revert changes: %w", err)) + if err = changes.discard(); err != nil { + log.Error("Failed to discard changes with multi-transaction snapshot", "err", err) + buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to discard changes: %w", err)) } } else { if err = changes.apply(); err != nil { diff --git a/miner/algo_greedy.go b/miner/algo_greedy.go index 7650563ff1..4a712f03c3 100644 --- a/miner/algo_greedy.go +++ b/miner/algo_greedy.go @@ -40,37 +40,15 @@ func newGreedyBuilder( algoConf: *algoConf, } // Initialize block builder function - var buildBlockFunc BuildBlockFunc - if algoConf.EnableMultiTxSnap { - buildBlockFunc = func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - orders := types.NewTransactionsByPriceAndNonce(builder.inputEnvironment.signer, transactions, - simBundles, simSBundles, builder.inputEnvironment.header.BaseFee) - - usedBundles, usedSbundles, err := BuildMultiTxSnapBlock( - builder.inputEnvironment, - builder.builderKey, - builder.chainData, - builder.algoConf, - orders, - ) - if err != nil { - log.Debug("Error(s) building multi-tx snapshot block", "err", err) - } - return builder.inputEnvironment, usedBundles, usedSbundles - } - } else { - buildBlockFunc = func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - orders := types.NewTransactionsByPriceAndNonce(builder.inputEnvironment.signer, transactions, - simBundles, simSBundles, builder.inputEnvironment.header.BaseFee) - - envDiff := newEnvironmentDiff(builder.inputEnvironment.copy()) - usedBundles, usedSbundles := builder.mergeOrdersIntoEnvDiff(envDiff, orders) - envDiff.applyToBaseEnv() - return envDiff.baseEnvironment, usedBundles, usedSbundles - } - } + builder.buildBlockFunc = NewBuildBlockFunc( + builder.inputEnvironment, + builder.builderKey, + builder.chainData, + builder.algoConf, + nil, + builder, + ) - builder.buildBlockFunc = buildBlockFunc return builder, nil } @@ -137,9 +115,5 @@ func (b *greedyBuilder) mergeOrdersIntoEnvDiff( } func (b *greedyBuilder) buildBlock(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - orders := types.NewTransactionsByPriceAndNonce(b.inputEnvironment.signer, transactions, simBundles, simSBundles, b.inputEnvironment.header.BaseFee) - envDiff := newEnvironmentDiff(b.inputEnvironment.copy()) - usedBundles, usedSbundles := b.mergeOrdersIntoEnvDiff(envDiff, orders) - envDiff.applyToBaseEnv() - return envDiff.baseEnvironment, usedBundles, usedSbundles + return b.buildBlockFunc(simBundles, simSBundles, transactions) } diff --git a/miner/algo_greedy_buckets.go b/miner/algo_greedy_buckets.go index 3bec22d3b3..63fad2989a 100644 --- a/miner/algo_greedy_buckets.go +++ b/miner/algo_greedy_buckets.go @@ -46,39 +46,7 @@ func newGreedyBucketsBuilder( } // Initialize block builder function - var buildBlockFunc BuildBlockFunc - if algoConf.EnableMultiTxSnap { - buildBlockFunc = func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, - transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - orders := types.NewTransactionsByPriceAndNonce(builder.inputEnvironment.signer, transactions, - simBundles, simSBundles, builder.inputEnvironment.header.BaseFee) - - usedBundles, usedSbundles, err := BuildMultiTxSnapBlock( - builder.inputEnvironment, - builder.builderKey, - builder.chainData, - builder.algoConf, - orders, - ) - if err != nil { - log.Trace("Error(s) building multi-tx snapshot block", "err", err) - } - return builder.inputEnvironment, usedBundles, usedSbundles - } - } else { - buildBlockFunc = func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, - transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - orders := types.NewTransactionsByPriceAndNonce(builder.inputEnvironment.signer, transactions, - simBundles, simSBundles, builder.inputEnvironment.header.BaseFee) - - envDiff := newEnvironmentDiff(builder.inputEnvironment.copy()) - usedBundles, usedSbundles := builder.mergeOrdersIntoEnvDiff(envDiff, orders) - envDiff.applyToBaseEnv() - return envDiff.baseEnvironment, usedBundles, usedSbundles - } - } - - builder.buildBlockFunc = buildBlockFunc + builder.buildBlockFunc = NewBuildBlockFunc(builder.inputEnvironment, builder.builderKey, builder.chainData, builder.algoConf, builder, nil) return builder, nil } diff --git a/miner/env_changes.go b/miner/env_changes.go index 6fc7cdc747..195205e750 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -13,7 +13,7 @@ import ( "github.com/ethereum/go-ethereum/log" ) -// envChanges is a helper struct to apply and revert changes to the environment +// envChanges is a helper struct to apply and discard changes to the environment type envChanges struct { env *environment gasPool *core.GasPool @@ -65,7 +65,7 @@ func (c *envChanges) commitTx(tx *types.Transaction, chData chainData) (*types.R profitBefore = new(big.Int).Set(c.profit) ) signer := c.env.signer - sender, err := types.Sender(signer, tx) + from, err := types.Sender(signer, tx) if err != nil { return nil, popTx, err } @@ -75,7 +75,7 @@ func (c *envChanges) commitTx(tx *types.Transaction, chData chainData) (*types.R return nil, shiftTx, err } - if _, in := chData.blacklist[sender]; in { + if _, in := chData.blacklist[from]; in { return nil, popTx, errors.New("blacklist violation, tx.sender") } @@ -100,25 +100,21 @@ func (c *envChanges) commitTx(tx *types.Transaction, chData chainData) (*types.R switch { case errors.Is(err, core.ErrGasLimitReached): // Pop the current out-of-gas transaction without shifting in the next from the account - from, _ := types.Sender(signer, tx) log.Trace("Gas limit exceeded for current block", "sender", from) return receipt, popTx, err case errors.Is(err, core.ErrNonceTooLow): // New head notification data race between the transaction pool and miner, shift - from, _ := types.Sender(signer, tx) log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) return receipt, shiftTx, err case errors.Is(err, core.ErrNonceTooHigh): // Reorg notification data race between the transaction pool and miner, skip account = - from, _ := types.Sender(signer, tx) log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) return receipt, popTx, err case errors.Is(err, core.ErrTxTypeNotSupported): // Pop the unsupported transaction without shifting in the next from the account - from, _ := types.Sender(signer, tx) log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) return receipt, popTx, err @@ -144,7 +140,7 @@ func (c *envChanges) commitTx(tx *types.Transaction, chData chainData) (*types.R return receipt, shiftTx, nil } -func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainData) error { +func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainData, algoConf algorithmConfig) error { var ( profitBefore = new(big.Int).Set(c.profit) coinbaseBefore = new(big.Int).Set(c.env.state.GetBalance(c.env.coinbase)) @@ -158,6 +154,7 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat ) for _, tx := range bundle.OriginalBundle.Txs { + txHash := tx.Hash() // TODO: Checks for base fee and dynamic fee txs should be moved to the transaction pool, // similar to mev-share bundles. See SBundlesPool.validateTx() for reference. if hasBaseFee && tx.Type() == types.DynamicFeeTxType { @@ -180,14 +177,31 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat receipt, _, err := c.commitTx(tx, chData) if err != nil { - log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) + isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) + // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one + if algoConf.DropRevertibleTxOnErr && isRevertibleTx { + log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + "tx", txHash, "err", err) + continue + } + log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) bundleErr = err break } - if receipt.Status != types.ReceiptStatusSuccessful && !bundle.OriginalBundle.RevertingHash(tx.Hash()) { - log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) - bundleErr = errors.New("bundle tx revert") + if receipt != nil { + if receipt.Status == types.ReceiptStatusFailed && !bundle.OriginalBundle.RevertingHash(txHash) { + // if transaction reverted and isn't specified as reverting hash, return error + log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) + bundleErr = errors.New("bundle tx revert") + } + } else { + // NOTE: The expectation is that a receipt is only nil if an error occurred. + // If there is no error but receipt is nil, there is likely a programming error. + bundleErr = errors.New("invalid receipt when no error occurred") + } + + if bundleErr != nil { break } } @@ -201,9 +215,14 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat bundleProfit = new(big.Int).Sub(c.env.state.GetBalance(c.env.coinbase), coinbaseBefore) gasUsed = c.usedGas - gasUsedBefore - effGP = new(big.Int).Div(bundleProfit, new(big.Int).SetUint64(gasUsed)) simEffGP = new(big.Int).Set(bundle.MevGasPrice) + effGP *big.Int ) + if gasUsed == 0 { + effGP = new(big.Int).SetUint64(0) + } else { + effGP = new(big.Int).Div(bundleProfit, new(big.Int).SetUint64(gasUsed)) + } // allow >-1% divergence effGP.Mul(effGP, common.Big100) @@ -219,6 +238,8 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat } func (c *envChanges) CommitSBundle(sbundle *types.SimSBundle, chData chainData, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { + // TODO: Suggestion for future improvement: instead of checking if key is nil, panic. + // Discussed with @Ruteri, see PR#90 for details: https://github.com/flashbots/builder/pull/90#discussion_r1285567550 if key == nil { return errNoPrivateKey } @@ -323,6 +344,13 @@ func (c *envChanges) commitSBundle(sbundle *types.SBundle, chData chainData, key if el.Tx != nil { receipt, _, err := c.commitTx(el.Tx, chData) if err != nil { + // if drop enabled, and revertible tx has error on commit, + // we skip the transaction and continue with next one + if algoConf.DropRevertibleTxOnErr && el.CanRevert { + log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + "tx", el.Tx.Hash(), "err", err) + continue + } return err } if receipt.Status != types.ReceiptStatusSuccessful && !el.CanRevert { @@ -392,11 +420,13 @@ func (c *envChanges) commitSBundle(sbundle *types.SBundle, chData chainData, key return nil } -// revert reverts all changes to the environment - every commit operation must be followed by a revert or apply operation -func (c *envChanges) revert() error { +// discard reverts all changes to the environment - every commit operation must be followed by a discard or apply operation +func (c *envChanges) discard() error { return c.env.state.MultiTxSnapshotRevert() } +// rollback reverts all changes to the environment - whereas apply and discard update the state, rollback only updates the environment +// the intended use is to call rollback after a commit operation has failed func (c *envChanges) rollback( gasUsedBefore uint64, gasPoolBefore *core.GasPool, profitBefore *big.Int, txsBefore []*types.Transaction, receiptsBefore []*types.Receipt) { diff --git a/miner/env_changes_test.go b/miner/env_changes_test.go index f1225308bf..b7bbed4571 100644 --- a/miner/env_changes_test.go +++ b/miner/env_changes_test.go @@ -60,11 +60,9 @@ func TestTxCommitSnaps(t *testing.T) { func TestBundleCommitSnaps(t *testing.T) { statedb, chData, signers := genTestSetup() + algoConf := defaultAlgorithmConfig + algoConf.EnableMultiTxSnap = true env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) - changes, err := newEnvChanges(env) - if err != nil { - t.Fatal("can't create env changes", err) - } tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) @@ -79,7 +77,12 @@ func TestBundleCommitSnaps(t *testing.T) { t.Fatal("Failed to simulate bundle", err) } - err = changes.commitBundle(&simBundle, chData) + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + + err = changes.commitBundle(&simBundle, chData, algoConf) if err != nil { t.Fatal("Failed to commit bundle", err) } @@ -166,11 +169,9 @@ func TestCommitTxOverGasLimitSnaps(t *testing.T) { func TestErrorBundleCommitSnaps(t *testing.T) { statedb, chData, signers := genTestSetup() + algoConf := defaultAlgorithmConfig + algoConf.EnableMultiTxSnap = true env := newEnvironment(chData, statedb, signers.addresses[0], 21000*2, big.NewInt(1)) - changes, err := newEnvChanges(env) - if err != nil { - t.Fatal("can't create env changes", err) - } // This tx will be included before bundle so bundle will fail because of gas limit tx0 := signers.signTx(4, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) @@ -188,6 +189,11 @@ func TestErrorBundleCommitSnaps(t *testing.T) { t.Fatal("Failed to simulate bundle", err) } + changes, err := newEnvChanges(env) + if err != nil { + t.Fatal("can't create env changes", err) + } + _, _, err = changes.commitTx(tx0, chData) if err != nil { t.Fatal("Failed to commit tx0", err) @@ -198,7 +204,7 @@ func TestErrorBundleCommitSnaps(t *testing.T) { newProfitBefore := new(big.Int).Set(changes.profit) balanceBefore := changes.env.state.GetBalance(signers.addresses[2]) - err = changes.commitBundle(&simBundle, chData) + err = changes.commitBundle(&simBundle, chData, algoConf) if err == nil { t.Fatal("Committed failed bundle", err) } @@ -348,7 +354,7 @@ func TestBlacklistSnaps(t *testing.T) { t.Fatal("committed blacklisted transaction: trace") } - err = changes.revert() + err = changes.discard() if err != nil { t.Fatal("failed reverting changes", err) } diff --git a/miner/environment_diff.go b/miner/environment_diff.go index 73c1c78d69..238a47b774 100644 --- a/miner/environment_diff.go +++ b/miner/environment_diff.go @@ -130,6 +130,7 @@ func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chDa var gasUsed uint64 for _, tx := range bundle.OriginalBundle.Txs { + txHash := tx.Hash() if tmpEnvDiff.header.BaseFee != nil && tx.Type() == types.DynamicFeeTxType { // Sanity check for extremely large numbers if tx.GasFeeCap().BitLen() > 256 { @@ -165,13 +166,27 @@ func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chDa receipt, _, err := tmpEnvDiff.commitTx(tx, chData) if err != nil { - log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) + isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) + // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one + if algoConf.DropRevertibleTxOnErr && isRevertibleTx { + log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + "tx", txHash, "err", err) + continue + } + log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) return err } - if receipt.Status != types.ReceiptStatusSuccessful && !bundle.OriginalBundle.RevertingHash(tx.Hash()) { - log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", tx.Hash(), "err", err) - return errors.New("bundle tx revert") + if receipt != nil { + if receipt.Status == types.ReceiptStatusFailed && !bundle.OriginalBundle.RevertingHash(txHash) { + // if transaction reverted and isn't specified as reverting hash, return error + log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) + return errors.New("bundle tx revert") + } + } else { + // NOTE: The expectation is that a receipt is only nil if an error occurred. + // If there is no error but receipt is nil, there is likely a programming error. + return errors.New("invalid receipt when no error occurred") } gasUsed += receipt.GasUsed @@ -182,7 +197,12 @@ func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chDa bundleProfit := coinbaseBalanceDelta - bundleActualEffGP := bundleProfit.Div(bundleProfit, big.NewInt(int64(gasUsed))) + var bundleActualEffGP *big.Int + if gasUsed == 0 { + bundleActualEffGP = big.NewInt(0) + } else { + bundleActualEffGP = bundleProfit.Div(bundleProfit, big.NewInt(int64(gasUsed))) + } bundleSimEffGP := new(big.Int).Set(bundle.MevGasPrice) // allow >-1% divergence @@ -237,6 +257,8 @@ func (envDiff *environmentDiff) commitPayoutTx(amount *big.Int, sender, receiver } func (envDiff *environmentDiff) commitSBundle(b *types.SimSBundle, chData chainData, interrupt *int32, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { + // TODO: Suggestion for future improvement: instead of checking if key is nil, panic. + // Discussed with @Ruteri, see PR#90 for details: https://github.com/flashbots/builder/pull/90#discussion_r1285567550 if key == nil { return errNoPrivateKey } @@ -246,7 +268,7 @@ func (envDiff *environmentDiff) commitSBundle(b *types.SimSBundle, chData chainD coinbaseBefore := tmpEnvDiff.state.GetBalance(tmpEnvDiff.header.Coinbase) gasBefore := tmpEnvDiff.gasPool.Gas() - if err := tmpEnvDiff.commitSBundleInner(b.Bundle, chData, interrupt, key); err != nil { + if err := tmpEnvDiff.commitSBundleInner(b.Bundle, chData, interrupt, key, algoConf); err != nil { return err } @@ -297,7 +319,7 @@ func (envDiff *environmentDiff) commitSBundle(b *types.SimSBundle, chData chainD return nil } -func (envDiff *environmentDiff) commitSBundleInner(b *types.SBundle, chData chainData, interrupt *int32, key *ecdsa.PrivateKey) error { +func (envDiff *environmentDiff) commitSBundleInner(b *types.SBundle, chData chainData, interrupt *int32, key *ecdsa.PrivateKey, algoConf algorithmConfig) error { // check inclusion minBlock := b.Inclusion.BlockNumber maxBlock := b.Inclusion.MaxBlockNumber @@ -330,13 +352,20 @@ func (envDiff *environmentDiff) commitSBundleInner(b *types.SBundle, chData chai if el.Tx != nil { receipt, _, err := envDiff.commitTx(el.Tx, chData) if err != nil { + // if drop enabled, and revertible tx has error on commit, + // we skip the transaction and continue with next one + if algoConf.DropRevertibleTxOnErr && el.CanRevert { + log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + "tx", el.Tx.Hash(), "err", err) + continue + } return err } if receipt.Status != types.ReceiptStatusSuccessful && !el.CanRevert { return errors.New("tx failed") } } else if el.Bundle != nil { - err := envDiff.commitSBundleInner(el.Bundle, chData, interrupt, key) + err := envDiff.commitSBundleInner(el.Bundle, chData, interrupt, key, algoConf) if err != nil { return err } From 0d612c6df98c07769b65f0614541ee605453ad32 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 9 Aug 2023 13:43:30 -0500 Subject: [PATCH 28/46] Add unit tests for state comparison, potential fix for gas and root mismatch through snapshot revert and removal of tx rollback --- core/state/statedb.go | 4 + miner/algo_common_test.go | 331 ++++++++++++++++++++++++++++++++++++++ miner/env_changes.go | 36 +---- 3 files changed, 336 insertions(+), 35 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 040f0f6ede..7b2df7cf75 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1222,3 +1222,7 @@ func (s *StateDB) MultiTxSnapshotCommit() (err error) { _, err = s.multiTxSnapshotStack.Commit() return } + +func (s *StateDB) MultiTxSnapshotStackSize() int { + return s.multiTxSnapshotStack.Size() +} diff --git a/miner/algo_common_test.go b/miner/algo_common_test.go index 8acb3dde6a..9f8630a151 100644 --- a/miner/algo_common_test.go +++ b/miner/algo_common_test.go @@ -1,9 +1,11 @@ package miner import ( + "bytes" "crypto/ecdsa" "errors" "fmt" + "github.com/ethereum/go-ethereum/rlp" "math/big" "testing" @@ -619,3 +621,332 @@ func TestPayoutTxUtils(t *testing.T) { require.Equal(t, env.state.GetNonce(signers.addresses[1]), uint64(3)) } + +const ( + Baseline = 0 + SingleSnapshot = 1 + MultiSnapshot = 2 +) + +type stateComparisonTestContext struct { + Name string + + statedb *state.StateDB + chainData chainData + signers signerList + + env *environment + + envDiff *environmentDiff + changes *envChanges + + transactions []*types.Transaction + bundles []types.SimulatedBundle + + rootHash common.Hash +} + +type stateComparisonTestContexts []stateComparisonTestContext + +func (sc stateComparisonTestContexts) ValidateRootHashes(t *testing.T, expected common.Hash) { + for _, tc := range sc { + require.Equal(t, expected.Bytes(), tc.rootHash.Bytes(), + "root hash mismatch for test context %s [expected: %s] [found: %s]", + tc.Name, expected.TerminalString(), tc.rootHash.TerminalString()) + } +} + +func (sc stateComparisonTestContexts) GenerateTransactions(t *testing.T, txCount int, failEveryN int) { + for tcIndex, tc := range sc { + signers := tc.signers + tc.transactions = sc.generateTransactions(txCount, failEveryN, signers) + tc.signers = signers + require.Len(t, tc.transactions, txCount) + + sc[tcIndex] = tc + } +} + +func (sc stateComparisonTestContexts) generateTransactions(txCount int, failEveryN int, signers signerList) []*types.Transaction { + transactions := make([]*types.Transaction, 0, txCount) + for i := 0; i < txCount; i++ { + var data []byte + if failEveryN != 0 && i%failEveryN == 0 { + data = []byte{0x01} + } else { + data = []byte{} + } + + from := i % len(signers.addresses) + tx := signers.signTx(from, params.TxGas, big.NewInt(0), big.NewInt(1), + signers.addresses[(i+1)%len(signers.addresses)], big.NewInt(0), data) + transactions = append(transactions, tx) + } + + return transactions +} + +func (sc stateComparisonTestContexts) UpdateRootHashes(t *testing.T) { + for tcIndex, tc := range sc { + if tc.envDiff != nil { + tc.rootHash = tc.envDiff.baseEnvironment.state.IntermediateRoot(true) + } else { + tc.rootHash = tc.env.state.IntermediateRoot(true) + } + sc[tcIndex] = tc + + require.NotEmpty(t, tc.rootHash.Bytes(), "root hash is empty for test context %s", tc.Name) + } +} + +func (sc stateComparisonTestContexts) ValidateTestCases(t *testing.T, reference int) { + expected := sc[reference] + var ( + expectedGasPool *core.GasPool = expected.envDiff.baseEnvironment.gasPool + expectedHeader *types.Header = expected.envDiff.baseEnvironment.header + expectedProfit *big.Int = expected.envDiff.baseEnvironment.profit + expectedTxCount int = expected.envDiff.baseEnvironment.tcount + expectedTransactions []*types.Transaction = expected.envDiff.baseEnvironment.txs + expectedReceipts types.Receipts = expected.envDiff.baseEnvironment.receipts + ) + for tcIndex, tc := range sc { + if tcIndex == reference { + continue + } + + var ( + actualGasPool *core.GasPool = tc.env.gasPool + actualHeader *types.Header = tc.env.header + actualProfit *big.Int = tc.env.profit + actualTxCount int = tc.env.tcount + actualTransactions []*types.Transaction = tc.env.txs + actualReceipts types.Receipts = tc.env.receipts + ) + if actualGasPool.Gas() != expectedGasPool.Gas() { + t.Errorf("gas pool mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, expectedGasPool.Gas(), actualGasPool.Gas()) + } + + if actualHeader.Hash() != expectedHeader.Hash() { + t.Errorf("header hash mismatch for test context %s [expected: %s] [found: %s]", + tc.Name, expectedHeader.Hash().TerminalString(), actualHeader.Hash().TerminalString()) + } + + if actualProfit.Cmp(expectedProfit) != 0 { + t.Errorf("profit mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, expectedProfit, actualProfit) + } + + if actualTxCount != expectedTxCount { + t.Errorf("transaction count mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, expectedTxCount, actualTxCount) + break + } + + if len(actualTransactions) != len(expectedTransactions) { + t.Errorf("transaction count mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, len(expectedTransactions), len(actualTransactions)) + } + + for txIdx := 0; txIdx < len(actualTransactions); txIdx++ { + expectedTx := expectedTransactions[txIdx] + actualTx := actualTransactions[txIdx] + + expectedBytes, err := rlp.EncodeToBytes(expectedTx) + if err != nil { + t.Fatalf("failed to encode expected transaction #%d: %v", txIdx, err) + } + + actualBytes, err := rlp.EncodeToBytes(actualTx) + if err != nil { + t.Fatalf("failed to encode actual transaction #%d: %v", txIdx, err) + } + + if !bytes.Equal(expectedBytes, actualBytes) { + t.Errorf("transaction #%d mismatch for test context %s [expected: %v] [found: %v]", + txIdx, tc.Name, expectedTx, actualTx) + } + } + + if len(actualReceipts) != len(expectedReceipts) { + t.Errorf("receipt count mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, len(expectedReceipts), len(actualReceipts)) + } + } +} + +func (sc stateComparisonTestContexts) Init(t *testing.T) stateComparisonTestContexts { + for i, tc := range sc { + tc = stateComparisonTestContext{} + tc.statedb, tc.chainData, tc.signers = genTestSetup() + tc.env = newEnvironment(tc.chainData, tc.statedb, tc.signers.addresses[0], GasLimit, big.NewInt(1)) + var err error + switch i { + case Baseline: + tc.Name = "baseline" + tc.envDiff = newEnvironmentDiff(tc.env) + case SingleSnapshot: + tc.Name = "single-snapshot" + tc.changes, err = newEnvChanges(tc.env) + case MultiSnapshot: + tc.Name = "multi-snapshot" + tc.changes, err = newEnvChanges(tc.env) + } + + require.NoError(t, err, "failed to initialize test contexts: %v", err) + sc[i] = tc + } + return sc +} + +func TestStateComparisons(t *testing.T) { + var testContexts = make(stateComparisonTestContexts, 3) + testContexts = testContexts.Init(t) + + // test commit tx + for i := 0; i < 3; i++ { + tx1 := testContexts[i].signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), + testContexts[i].signers.addresses[2], big.NewInt(0), []byte{}) + var ( + receipt *types.Receipt + status int + err error + ) + switch i { + case Baseline: + receipt, status, err = testContexts[i].envDiff.commitTx(tx1, testContexts[i].chainData) + testContexts[i].envDiff.applyToBaseEnv() + + case SingleSnapshot: + receipt, status, err = testContexts[i].changes.commitTx(tx1, testContexts[i].chainData) + require.NoError(t, err, "can't commit single snapshot tx") + + err = testContexts[i].changes.apply() + case MultiSnapshot: + receipt, status, err = testContexts[i].changes.commitTx(tx1, testContexts[i].chainData) + require.NoError(t, err, "can't commit multi snapshot tx") + + err = testContexts[i].changes.apply() + } + require.NoError(t, err, "can't commit tx") + require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status) + require.Equal(t, 21000, int(receipt.GasUsed)) + require.Equal(t, shiftTx, status) + } + + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, Baseline) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + + // test bundle + for i, tc := range testContexts { + var ( + signers = tc.signers + header = tc.env.header + env = tc.env + chData = tc.chainData + ) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + mevBundle := types.MevBundle{ + Txs: types.Transactions{tx1, tx2}, + BlockNumber: header.Number, + } + + simBundle, err := simulateBundle(env, mevBundle, chData, nil) + require.NoError(t, err, "can't simulate bundle: %v", err) + + switch i { + case Baseline: + err = tc.envDiff.commitBundle(&simBundle, chData, nil, defaultAlgorithmConfig) + tc.envDiff.applyToBaseEnv() + + case SingleSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, "can't create multi tx snapshot: %v", err) + + err = tc.changes.commitBundle(&simBundle, chData, defaultAlgorithmConfig) + if err != nil { + break + } + + err = tc.changes.apply() + + case MultiSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, "can't create multi tx snapshot: %v", err) + + err = tc.changes.commitBundle(&simBundle, chData, defaultAlgorithmConfig) + if err != nil { + break + } + + err = tc.changes.apply() + } + + require.NoError(t, err, "can't commit bundle: %v", err) + } + + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, 0) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + + // generate 100 transactions, with 50% of them failing + var ( + txCount = 100 + failEveryN = 2 + ) + testContexts = testContexts.Init(t) + testContexts.GenerateTransactions(t, txCount, failEveryN) + require.Len(t, testContexts[Baseline].transactions, txCount) + + for txIdx := 0; txIdx < txCount; txIdx++ { + for ctxIdx, tc := range testContexts { + tx := tc.transactions[txIdx] + + var commitErr error + switch ctxIdx { + case Baseline: + _, _, commitErr = tc.envDiff.commitTx(tx, tc.chainData) + tc.envDiff.applyToBaseEnv() + + case SingleSnapshot: + err := tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, "can't create multi tx snapshot for tx %d: %v", txIdx, err) + + _, _, commitErr = tc.changes.commitTx(tx, tc.chainData) + require.NoError(t, tc.changes.apply()) + case MultiSnapshot: + err := tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, + "can't create multi tx snapshot: %v", err) + + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, + "can't create multi tx snapshot: %v", err) + + _, _, commitErr = tc.changes.commitTx(tx, tc.chainData) + require.NoError(t, tc.changes.apply()) + + // NOTE(wazzymandias): At the time of writing this, the changes struct does not reset after performing + // an apply - because the intended use of the changes struct is to create it and discard it + // after every commit->(discard||apply) loop. + // So for now to test multiple snapshots we apply the changes for the top of the stack and + // then pop the underlying state snapshot from the base of the stack. + // Otherwise, if changes are applied twice, then there can be double counting of transactions. + require.NoError(t, tc.changes.env.state.MultiTxSnapshotCommit()) + } + + if txIdx%failEveryN == 0 { + require.Errorf(t, commitErr, "tx %d should fail", txIdx) + } else { + require.NoError(t, commitErr, "tx %d should succeed, found: %v", txIdx, commitErr) + } + } + } + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, 0) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) +} diff --git a/miner/env_changes.go b/miner/env_changes.go index 195205e750..fd85b39ada 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -9,7 +9,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" ) @@ -57,13 +56,6 @@ func (c *envChanges) commitPayoutTx( } func (c *envChanges) commitTx(tx *types.Transaction, chData chainData) (*types.Receipt, int, error) { - var ( - gasPoolBefore = new(core.GasPool).AddGas(c.gasPool.Gas()) - usedGasBefore = c.usedGas - txsBefore = c.txs[:] - receiptsBefore = c.receipts[:] - profitBefore = new(big.Int).Set(c.profit) - ) signer := c.env.signer from, err := types.Sender(signer, tx) if err != nil { @@ -75,28 +67,9 @@ func (c *envChanges) commitTx(tx *types.Transaction, chData chainData) (*types.R return nil, shiftTx, err } - if _, in := chData.blacklist[from]; in { - return nil, popTx, errors.New("blacklist violation, tx.sender") - } - - if to := tx.To(); to != nil { - if _, in := chData.blacklist[*to]; in { - return nil, popTx, errors.New("blacklist violation, tx.to") - } - } - - cfg := *chData.chain.GetVMConfig() - // we set precompile to nil, but they are set in the validation code - // there will be no difference in the result if precompile is not it the blocklist - touchTracer := logger.NewAccessListTracer(nil, common.Address{}, common.Address{}, nil) - cfg.Tracer = touchTracer - cfg.Debug = true - c.env.state.SetTxContext(tx.Hash(), c.env.tcount+len(c.txs)) - receipt, err := core.ApplyTransaction(chData.chainConfig, chData.chain, &c.env.coinbase, c.gasPool, c.env.state, c.env.header, tx, &c.usedGas, cfg, nil) + receipt, _, err := applyTransactionWithBlacklist(signer, chData.chainConfig, chData.chain, &c.env.coinbase, c.gasPool, c.env.state, c.env.header, tx, &c.usedGas, *chData.chain.GetVMConfig(), chData.blacklist) if err != nil { - c.rollback(usedGasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) - switch { case errors.Is(err, core.ErrGasLimitReached): // Pop the current out-of-gas transaction without shifting in the next from the account @@ -126,13 +99,6 @@ func (c *envChanges) commitTx(tx *types.Transaction, chData chainData) (*types.R } } - for _, accessTuple := range touchTracer.AccessList() { - if _, in := chData.blacklist[accessTuple.Address]; in { - c.rollback(usedGasBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) - return nil, popTx, errors.New("blacklist violation, tx trace") - } - } - c.profit.Add(c.profit, new(big.Int).Mul(new(big.Int).SetUint64(receipt.GasUsed), gasPrice)) c.txs = append(c.txs, tx) c.receipts = append(c.receipts, receipt) From fc32a84e178b32581d118f7cf26055de5606d5dd Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 9 Aug 2023 13:56:15 -0500 Subject: [PATCH 29/46] Fix linter error --- miner/algo_common_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/miner/algo_common_test.go b/miner/algo_common_test.go index 9f8630a151..8957bc51d7 100644 --- a/miner/algo_common_test.go +++ b/miner/algo_common_test.go @@ -5,10 +5,11 @@ import ( "crypto/ecdsa" "errors" "fmt" - "github.com/ethereum/go-ethereum/rlp" "math/big" "testing" + "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/require" mapset "github.com/deckarep/golang-set/v2" @@ -641,7 +642,6 @@ type stateComparisonTestContext struct { changes *envChanges transactions []*types.Transaction - bundles []types.SimulatedBundle rootHash common.Hash } @@ -776,8 +776,8 @@ func (sc stateComparisonTestContexts) ValidateTestCases(t *testing.T, reference } func (sc stateComparisonTestContexts) Init(t *testing.T) stateComparisonTestContexts { - for i, tc := range sc { - tc = stateComparisonTestContext{} + for i := range sc { + tc := stateComparisonTestContext{} tc.statedb, tc.chainData, tc.signers = genTestSetup() tc.env = newEnvironment(tc.chainData, tc.statedb, tc.signers.addresses[0], GasLimit, big.NewInt(1)) var err error From bdc74228b959af89c8c7f32673c812e3a030aa43 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 9 Aug 2023 15:46:21 -0500 Subject: [PATCH 30/46] Try using top level changes to avoid env mutations --- miner/algo_common.go | 51 ++++++++++++++++------- miner/env_changes.go | 96 +++++++++++++++++++++++++++++++++++--------- 2 files changed, 114 insertions(+), 33 deletions(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index 57aab284ab..f3e49cfc31 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -367,19 +367,31 @@ func BuildMultiTxSnapBlock( buildBlockErrors []error ) + changes, err := newEnvChanges(inputEnvironment) + if err != nil { + return nil, nil, err + } + opMap := map[bool]func() error{ + true: changes.env.state.MultiTxSnapshotRevert, + false: changes.env.state.MultiTxSnapshotCommit, + } + for { order := orders.Peek() if order == nil { break } - orderFailed = false - changes, err := newEnvChanges(inputEnvironment) - // if changes cannot be instantiated, return early - if err != nil { - log.Error("Failed to create changes", "err", err) + if err = changes.env.state.NewMultiTxSnapshot(); err != nil { return nil, nil, err } + orderFailed = false + //changes, err := newEnvChanges(inputEnvironment) + // if changes cannot be instantiated, return early + //if err != nil { + // log.Error("Failed to create changes", "err", err) + // return nil, nil, err + //} if tx := order.Tx(); tx != nil { _, skip, err := changes.commitTx(tx, chData) @@ -423,17 +435,26 @@ func BuildMultiTxSnapBlock( panic("unsupported order type found") } - if orderFailed { - if err = changes.discard(); err != nil { - log.Error("Failed to discard changes with multi-transaction snapshot", "err", err) - buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to discard changes: %w", err)) - } - } else { - if err = changes.apply(); err != nil { - log.Error("Failed to apply changes with multi-transaction snapshot", "err", err) - buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to apply changes: %w", err)) - } + if err = opMap[orderFailed](); err != nil { + log.Error("Failed to apply changes with multi-transaction snapshot", "err", err) + buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to apply changes: %w", err)) } + //if orderFailed { + // if err = changes.discard(); err != nil { + // log.Error("Failed to discard changes with multi-transaction snapshot", "err", err) + // buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to discard changes: %w", err)) + // } + //} else { + // if err = changes.apply(); err != nil { + // log.Error("Failed to apply changes with multi-transaction snapshot", "err", err) + // buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to apply changes: %w", err)) + // } + //} + } + + if err = changes.apply(); err != nil { + log.Error("Failed to apply changes with multi-transaction snapshot", "err", err) + buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to apply changes: %w", err)) } return usedBundles, usedSbundles, errors.Join(buildBlockErrors...) diff --git a/miner/env_changes.go b/miner/env_changes.go index fd85b39ada..ff2c02d0c3 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -120,6 +120,8 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat ) for _, tx := range bundle.OriginalBundle.Txs { + gasUsed := c.usedGas + gasPool := new(core.GasPool).AddGas(c.gasPool.Gas()) txHash := tx.Hash() // TODO: Checks for base fee and dynamic fee txs should be moved to the transaction pool, // similar to mev-share bundles. See SBundlesPool.validateTx() for reference. @@ -140,36 +142,94 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat break } } + if err := c.env.state.NewMultiTxSnapshot(); err != nil { + bundleErr = err + break + } receipt, _, err := c.commitTx(tx, chData) - - if err != nil { + switch err { + case nil: + switch { + case receipt == nil: + panic("receipt is nil") + case receipt.Status == types.ReceiptStatusFailed: + isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) + // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one + if algoConf.DropRevertibleTxOnErr && isRevertibleTx { + log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + "tx", txHash, "err", err) + c.usedGas = gasUsed + c.gasPool = gasPool + if err := c.env.state.MultiTxSnapshotRevert(); err != nil { + panic(err) + } + } else { + bundleErr = errors.New("bundle tx revert") + } + case receipt.Status == types.ReceiptStatusSuccessful: + fallthrough + default: + if err := c.env.state.MultiTxSnapshotCommit(); err != nil { + panic(err) + } + } + default: isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one if algoConf.DropRevertibleTxOnErr && isRevertibleTx { log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", "tx", txHash, "err", err) - continue - } - log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) - bundleErr = err - break - } - - if receipt != nil { - if receipt.Status == types.ReceiptStatusFailed && !bundle.OriginalBundle.RevertingHash(txHash) { - // if transaction reverted and isn't specified as reverting hash, return error - log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) - bundleErr = errors.New("bundle tx revert") + c.usedGas = gasUsed + c.gasPool = gasPool + if err := c.env.state.MultiTxSnapshotRevert(); err != nil { + panic(err) + } + } else { + bundleErr = err } - } else { - // NOTE: The expectation is that a receipt is only nil if an error occurred. - // If there is no error but receipt is nil, there is likely a programming error. - bundleErr = errors.New("invalid receipt when no error occurred") } if bundleErr != nil { + if err := c.env.state.MultiTxSnapshotRevert(); err != nil { + panic(err) + } break } + + //if err != nil { + // isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) + // // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one + // if algoConf.DropRevertibleTxOnErr && isRevertibleTx { + // log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + // "tx", txHash, "err", err) + // if err := c.env.state.MultiTxSnapshotRevert(); err != nil { + // panic(err) + // } + // continue + // } + // log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) + // bundleErr = err + // break + //} + // + //if receipt != nil { + // if receipt.Status == types.ReceiptStatusFailed && !bundle.OriginalBundle.RevertingHash(txHash) { + // // if transaction reverted and isn't specified as reverting hash, return error + // log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) + // bundleErr = errors.New("bundle tx revert") + // } + //} else { + // // NOTE: The expectation is that a receipt is only nil if an error occurred. + // // If there is no error but receipt is nil, there is likely a programming error. + // bundleErr = errors.New("invalid receipt when no error occurred") + //} + // + //if bundleErr != nil { + // if err := c.env.state.MultiTxSnapshotRevert(); err != nil { + // panic(err) + // } + // break + //} } if bundleErr != nil { From e526d0ece2a5cd3c043b76e1fccb208c0d1e300d Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 9 Aug 2023 18:35:48 -0500 Subject: [PATCH 31/46] Debug commit --- builder/service.go | 2 +- builder/utils.go | 26 +++++++++-- miner/env_changes.go | 107 ++++++++++++++++++++++++------------------- 3 files changed, 82 insertions(+), 53 deletions(-) diff --git a/builder/service.go b/builder/service.go index 55f4300c67..7cd777ded5 100644 --- a/builder/service.go +++ b/builder/service.go @@ -206,7 +206,7 @@ func Register(stack *node.Node, backend *eth.Ethereum, cfg *Config) error { } var validator *blockvalidation.BlockValidationAPI - if cfg.DryRun { + if cfg.DryRun || !cfg.DryRun { var accessVerifier *blockvalidation.AccessVerifier if cfg.ValidationBlocklist != "" { accessVerifier, err = blockvalidation.NewAccessVerifierFromFile(cfg.ValidationBlocklist) diff --git a/builder/utils.go b/builder/utils.go index 284285cf4e..0871ce73a6 100644 --- a/builder/utils.go +++ b/builder/utils.go @@ -7,12 +7,20 @@ import ( "encoding/json" "errors" "fmt" + "github.com/ethereum/go-ethereum/log" "io" "net/http" ) var errHTTPErrorResponse = errors.New("HTTP error response") +type JSONRPCResponse struct { + ID interface{} `json:"id"` + Result json.RawMessage `json:"result,omitempty"` + Error json.RawMessage `json:"error,omitempty"` + Version string `json:"jsonrpc"` +} + // SendSSZRequest is a request to send SSZ data to a remote relay. func SendSSZRequest(ctx context.Context, client http.Client, method, url string, payload []byte, useGzip bool) (code int, err error) { var req *http.Request @@ -55,11 +63,21 @@ func SendSSZRequest(ctx context.Context, client http.Client, method, url string, } defer resp.Body.Close() + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return resp.StatusCode, fmt.Errorf("could not read error response body for status code %d: %w", resp.StatusCode, err) + } + + res := new(JSONRPCResponse) + if err := json.Unmarshal(bodyBytes, &res); err != nil { + return resp.StatusCode, fmt.Errorf("could not unmarshal error response body for status code %d: %w", resp.StatusCode, err) + } + + if res.Error != nil { + log.Info("Error response", "code", resp.StatusCode, "message", string(res.Error)) + } + if resp.StatusCode > 299 { - bodyBytes, err := io.ReadAll(resp.Body) - if err != nil { - return resp.StatusCode, fmt.Errorf("could not read error response body for status code %d: %w", resp.StatusCode, err) - } return resp.StatusCode, fmt.Errorf("HTTP error response: %d / %s", resp.StatusCode, string(bodyBytes)) } return resp.StatusCode, nil diff --git a/miner/env_changes.go b/miner/env_changes.go index ff2c02d0c3..d7798998a1 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -120,9 +120,9 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat ) for _, tx := range bundle.OriginalBundle.Txs { - gasUsed := c.usedGas - gasPool := new(core.GasPool).AddGas(c.gasPool.Gas()) - txHash := tx.Hash() + //gasUsed := c.usedGas + //gasPool := new(core.GasPool).AddGas(c.gasPool.Gas()) + //txHash := tx.Hash() // TODO: Checks for base fee and dynamic fee txs should be moved to the transaction pool, // similar to mev-share bundles. See SBundlesPool.validateTx() for reference. if hasBaseFee && tx.Type() == types.DynamicFeeTxType { @@ -143,58 +143,69 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat } } if err := c.env.state.NewMultiTxSnapshot(); err != nil { - bundleErr = err - break + panic(err) } receipt, _, err := c.commitTx(tx, chData) - switch err { - case nil: - switch { - case receipt == nil: - panic("receipt is nil") - case receipt.Status == types.ReceiptStatusFailed: - isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) - // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one - if algoConf.DropRevertibleTxOnErr && isRevertibleTx { - log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", - "tx", txHash, "err", err) - c.usedGas = gasUsed - c.gasPool = gasPool - if err := c.env.state.MultiTxSnapshotRevert(); err != nil { - panic(err) - } - } else { - bundleErr = errors.New("bundle tx revert") - } - case receipt.Status == types.ReceiptStatusSuccessful: - fallthrough - default: - if err := c.env.state.MultiTxSnapshotCommit(); err != nil { - panic(err) - } - } - default: - isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) - // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one - if algoConf.DropRevertibleTxOnErr && isRevertibleTx { - log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", - "tx", txHash, "err", err) - c.usedGas = gasUsed - c.gasPool = gasPool - if err := c.env.state.MultiTxSnapshotRevert(); err != nil { - panic(err) - } - } else { - bundleErr = err - } - } - - if bundleErr != nil { + if err != nil { + bundleErr = err if err := c.env.state.MultiTxSnapshotRevert(); err != nil { panic(err) } break + } else { + if err := c.env.state.MultiTxSnapshotCommit(); err != nil { + panic(fmt.Sprintf("err: %v, receipt: %v", err, receipt)) + } + } + //switch err { + //case nil: + // switch { + // case receipt == nil: + // panic("receipt is nil") + // case receipt.Status == types.ReceiptStatusFailed: + // isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) + // // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one + // if algoConf.DropRevertibleTxOnErr && isRevertibleTx { + // log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + // "tx", txHash, "err", err) + // c.usedGas = gasUsed + // c.gasPool = gasPool + // if err := c.env.state.MultiTxSnapshotRevert(); err != nil { + // panic(err) + // } + // } else { + // bundleErr = errors.New("bundle tx revert") + // } + // case receipt.Status == types.ReceiptStatusSuccessful: + // fallthrough + // default: + // if err := c.env.state.MultiTxSnapshotCommit(); err != nil { + // panic(err) + // } + // } + //default: + // isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) + // // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one + // if algoConf.DropRevertibleTxOnErr && isRevertibleTx { + // log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + // "tx", txHash, "err", err) + // c.usedGas = gasUsed + // c.gasPool = gasPool + // if err := c.env.state.MultiTxSnapshotRevert(); err != nil { + // panic(err) + // } + // } else { + // bundleErr = err + // } + //} + + //if bundleErr != nil { + // if err := c.env.state.MultiTxSnapshotRevert(); err != nil { + // panic(err) + // } + // break + //} //if err != nil { // isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) From da5c54ed59ee5812118efbedcb58641c3d27a32c Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 9 Aug 2023 20:19:16 -0500 Subject: [PATCH 32/46] DRY profit logic --- builder/service.go | 2 +- builder/utils.go | 26 +++----------------- miner/algo_common.go | 35 ++++++++++++++++++++++++++ miner/env_changes.go | 27 +++++++++++--------- miner/environment_diff.go | 52 ++++++++++++++------------------------- 5 files changed, 74 insertions(+), 68 deletions(-) diff --git a/builder/service.go b/builder/service.go index 7cd777ded5..55f4300c67 100644 --- a/builder/service.go +++ b/builder/service.go @@ -206,7 +206,7 @@ func Register(stack *node.Node, backend *eth.Ethereum, cfg *Config) error { } var validator *blockvalidation.BlockValidationAPI - if cfg.DryRun || !cfg.DryRun { + if cfg.DryRun { var accessVerifier *blockvalidation.AccessVerifier if cfg.ValidationBlocklist != "" { accessVerifier, err = blockvalidation.NewAccessVerifierFromFile(cfg.ValidationBlocklist) diff --git a/builder/utils.go b/builder/utils.go index 0871ce73a6..284285cf4e 100644 --- a/builder/utils.go +++ b/builder/utils.go @@ -7,20 +7,12 @@ import ( "encoding/json" "errors" "fmt" - "github.com/ethereum/go-ethereum/log" "io" "net/http" ) var errHTTPErrorResponse = errors.New("HTTP error response") -type JSONRPCResponse struct { - ID interface{} `json:"id"` - Result json.RawMessage `json:"result,omitempty"` - Error json.RawMessage `json:"error,omitempty"` - Version string `json:"jsonrpc"` -} - // SendSSZRequest is a request to send SSZ data to a remote relay. func SendSSZRequest(ctx context.Context, client http.Client, method, url string, payload []byte, useGzip bool) (code int, err error) { var req *http.Request @@ -63,21 +55,11 @@ func SendSSZRequest(ctx context.Context, client http.Client, method, url string, } defer resp.Body.Close() - bodyBytes, err := io.ReadAll(resp.Body) - if err != nil { - return resp.StatusCode, fmt.Errorf("could not read error response body for status code %d: %w", resp.StatusCode, err) - } - - res := new(JSONRPCResponse) - if err := json.Unmarshal(bodyBytes, &res); err != nil { - return resp.StatusCode, fmt.Errorf("could not unmarshal error response body for status code %d: %w", resp.StatusCode, err) - } - - if res.Error != nil { - log.Info("Error response", "code", resp.StatusCode, "message", string(res.Error)) - } - if resp.StatusCode > 299 { + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return resp.StatusCode, fmt.Errorf("could not read error response body for status code %d: %w", resp.StatusCode, err) + } return resp.StatusCode, fmt.Errorf("HTTP error response: %d / %s", resp.StatusCode, string(bodyBytes)) } return resp.StatusCode, nil diff --git a/miner/algo_common.go b/miner/algo_common.go index f3e49cfc31..14d8f21331 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -170,6 +170,41 @@ func NewBuildBlockFunc( } } +func ValidateGasPriceAndProfit(algoConf algorithmConfig, actualPrice, expectedPrice *big.Int, tolerablePriceDifferencePercent int, + actualProfit, expectedProfit *big.Int) error { + // allow tolerablePriceDifferencePercent % divergence + expectedPriceMultiple := new(big.Int).Mul(expectedPrice, big.NewInt(100-int64(tolerablePriceDifferencePercent))) + actualPriceMultiple := new(big.Int).Mul(actualPrice, common.Big100) + + var errLowProfit *lowProfitError = nil + if expectedPriceMultiple.Cmp(actualPriceMultiple) > 0 { + errLowProfit = &lowProfitError{ + ExpectedEffectiveGasPrice: expectedPrice, + ActualEffectiveGasPrice: actualPrice, + } + } + + if algoConf.EnforceProfit { + // We want to make expected profit smaller to allow for some leeway in cases where the actual profit is + // lower due to transaction ordering + expectedProfitMultiple := common.PercentOf(expectedProfit, algoConf.ProfitThresholdPercent) + actualProfitMultiple := new(big.Int).Mul(actualProfit, common.Big100) + + if expectedProfitMultiple.Cmp(actualProfitMultiple) > 0 { + if errLowProfit == nil { + errLowProfit = new(lowProfitError) + } + errLowProfit.ExpectedProfit = expectedProfit + errLowProfit.ActualProfit = actualProfit + } + } + + if errLowProfit != nil { // staticcheck linter complains if we don't check for nil here + return errLowProfit + } + return nil +} + func checkInterrupt(i *int32) bool { return i != nil && atomic.LoadInt32(i) != commitInterruptNone } diff --git a/miner/env_changes.go b/miner/env_changes.go index d7798998a1..053ac8b538 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -156,7 +156,6 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat if err := c.env.state.MultiTxSnapshotCommit(); err != nil { panic(fmt.Sprintf("err: %v, receipt: %v", err, receipt)) } - } //switch err { //case nil: @@ -252,22 +251,28 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat bundleProfit = new(big.Int).Sub(c.env.state.GetBalance(c.env.coinbase), coinbaseBefore) gasUsed = c.usedGas - gasUsedBefore - simEffGP = new(big.Int).Set(bundle.MevGasPrice) - effGP *big.Int + // EGP = Effective Gas Price (Profit / GasUsed) + simulatedEGP = new(big.Int).Set(bundle.MevGasPrice) + actualEGP *big.Int + tolerablePriceDifferencePercent = 1 + + simulatedBundleProfit = new(big.Int).Set(bundle.TotalEth) + actualBundleProfit = new(big.Int).Set(bundleProfit) ) + if gasUsed == 0 { - effGP = new(big.Int).SetUint64(0) + actualEGP = big.NewInt(0) } else { - effGP = new(big.Int).Div(bundleProfit, new(big.Int).SetUint64(gasUsed)) + actualEGP = new(big.Int).Div(bundleProfit, big.NewInt(int64(gasUsed))) } - // allow >-1% divergence - effGP.Mul(effGP, common.Big100) - simEffGP.Mul(simEffGP, big.NewInt(99)) - if simEffGP.Cmp(effGP) > 0 { - log.Trace("Bundle underpays after inclusion", "bundle", bundle.OriginalBundle.Hash) + err := ValidateGasPriceAndProfit(algoConf, + actualEGP, simulatedEGP, tolerablePriceDifferencePercent, + actualBundleProfit, simulatedBundleProfit, + ) + if err != nil { c.rollback(gasUsedBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) - return errors.New("bundle underpays") + return err } c.profit.Add(profitBefore, bundleProfit) diff --git a/miner/environment_diff.go b/miner/environment_diff.go index 238a47b774..daefffa1e6 100644 --- a/miner/environment_diff.go +++ b/miner/environment_diff.go @@ -195,45 +195,29 @@ func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chDa coinbaseBalanceDelta := new(big.Int).Sub(coinbaseBalanceAfter, coinbaseBalanceBefore) tmpEnvDiff.newProfit.Add(profitBefore, coinbaseBalanceDelta) - bundleProfit := coinbaseBalanceDelta + var ( + bundleProfit = coinbaseBalanceDelta + // EGP = Effective Gas Price (Profit / GasUsed) + simulatedEGP = new(big.Int).Set(bundle.MevGasPrice) + actualEGP *big.Int + tolerablePriceDifferencePercent = 1 + + simulatedBundleProfit = new(big.Int).Set(bundle.TotalEth) + actualBundleProfit = new(big.Int).Set(bundleProfit) + ) - var bundleActualEffGP *big.Int if gasUsed == 0 { - bundleActualEffGP = big.NewInt(0) + actualEGP = big.NewInt(0) } else { - bundleActualEffGP = bundleProfit.Div(bundleProfit, big.NewInt(int64(gasUsed))) - } - bundleSimEffGP := new(big.Int).Set(bundle.MevGasPrice) - - // allow >-1% divergence - actualEGP := new(big.Int).Mul(bundleActualEffGP, common.Big100) // bundle actual effective gas price * 100 - simulatedEGP := new(big.Int).Mul(bundleSimEffGP, big.NewInt(99)) // bundle simulated effective gas price * 99 - - if simulatedEGP.Cmp(actualEGP) > 0 { - log.Trace("Bundle underpays after inclusion", "bundle", bundle.OriginalBundle.Hash) - return &lowProfitError{ - ExpectedEffectiveGasPrice: bundleSimEffGP, - ActualEffectiveGasPrice: bundleActualEffGP, - } + actualEGP = new(big.Int).Div(bundleProfit, big.NewInt(int64(gasUsed))) } - if algoConf.EnforceProfit { - // if profit is enforced between simulation and actual commit, only allow ProfitThresholdPercent divergence - simulatedBundleProfit := new(big.Int).Set(bundle.TotalEth) - actualBundleProfit := new(big.Int).Mul(bundleActualEffGP, big.NewInt(int64(gasUsed))) - - // We want to make simulated profit smaller to allow for some leeway in cases where the actual profit is - // lower due to transaction ordering - simulatedProfitMultiple := common.PercentOf(simulatedBundleProfit, algoConf.ProfitThresholdPercent) - actualProfitMultiple := new(big.Int).Mul(actualBundleProfit, common.Big100) - - if simulatedProfitMultiple.Cmp(actualProfitMultiple) > 0 { - log.Trace("Lower bundle profit found after inclusion", "bundle", bundle.OriginalBundle.Hash) - return &lowProfitError{ - ExpectedProfit: simulatedBundleProfit, - ActualProfit: actualBundleProfit, - } - } + err := ValidateGasPriceAndProfit(algoConf, + actualEGP, simulatedEGP, tolerablePriceDifferencePercent, + actualBundleProfit, simulatedBundleProfit, + ) + if err != nil { + return err } *envDiff = *tmpEnvDiff From ab3aaaf52c312c9b47cbd7ce3ea33fe62d596e29 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Tue, 15 Aug 2023 09:11:52 -0500 Subject: [PATCH 33/46] Revert commit This rolls back to commit before fc32a84e178b32581d118f7cf26055de5606d5dd. --- builder/builder.go | 5 + miner/algo_common.go | 23 +--- miner/algo_common_test.go | 263 ++++++++++++++++++++------------------ miner/env_changes.go | 122 ++++-------------- 4 files changed, 175 insertions(+), 238 deletions(-) diff --git a/builder/builder.go b/builder/builder.go index 44201bcdeb..ba174317b1 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -324,6 +324,11 @@ func (b *Builder) submitCapellaBlock(block *types.Block, blockValue *big.Int, or log.Error("could not validate block for capella", "err", err) } } else { + err = b.validator.ValidateBuilderSubmissionV2(&blockvalidation.BuilderBlockValidationRequestV2{SubmitBlockRequest: blockSubmitReq, RegisteredGasLimit: vd.GasLimit}) + if err != nil { + log.Error("could not validate block for capella", "err", err) + return err + } go b.ds.ConsumeBuiltBlock(block, blockValue, ordersClosedAt, sealedAt, commitedBundles, allBundles, usedSbundles, &blockBidMsg) err = b.relay.SubmitBlockCapella(&blockSubmitReq, vd) if err != nil { diff --git a/miner/algo_common.go b/miner/algo_common.go index 14d8f21331..2d27086b75 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -178,6 +178,7 @@ func ValidateGasPriceAndProfit(algoConf algorithmConfig, actualPrice, expectedPr var errLowProfit *lowProfitError = nil if expectedPriceMultiple.Cmp(actualPriceMultiple) > 0 { + fmt.Println("egp diff", expectedPriceMultiple.String(), actualPriceMultiple.String()) errLowProfit = &lowProfitError{ ExpectedEffectiveGasPrice: expectedPrice, ActualEffectiveGasPrice: actualPrice, @@ -194,6 +195,7 @@ func ValidateGasPriceAndProfit(algoConf algorithmConfig, actualPrice, expectedPr if errLowProfit == nil { errLowProfit = new(lowProfitError) } + fmt.Println("profit diff") errLowProfit.ExpectedProfit = expectedProfit errLowProfit.ActualProfit = actualProfit } @@ -417,16 +419,12 @@ func BuildMultiTxSnapBlock( break } + orderFailed = false + // if snapshot cannot be instantiated, return early if err = changes.env.state.NewMultiTxSnapshot(); err != nil { + log.Error("Failed to create snapshot", "err", err) return nil, nil, err } - orderFailed = false - //changes, err := newEnvChanges(inputEnvironment) - // if changes cannot be instantiated, return early - //if err != nil { - // log.Error("Failed to create changes", "err", err) - // return nil, nil, err - //} if tx := order.Tx(); tx != nil { _, skip, err := changes.commitTx(tx, chData) @@ -474,17 +472,6 @@ func BuildMultiTxSnapBlock( log.Error("Failed to apply changes with multi-transaction snapshot", "err", err) buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to apply changes: %w", err)) } - //if orderFailed { - // if err = changes.discard(); err != nil { - // log.Error("Failed to discard changes with multi-transaction snapshot", "err", err) - // buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to discard changes: %w", err)) - // } - //} else { - // if err = changes.apply(); err != nil { - // log.Error("Failed to apply changes with multi-transaction snapshot", "err", err) - // buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to apply changes: %w", err)) - // } - //} } if err = changes.apply(); err != nil { diff --git a/miner/algo_common_test.go b/miner/algo_common_test.go index 8957bc51d7..f2bdda0447 100644 --- a/miner/algo_common_test.go +++ b/miner/algo_common_test.go @@ -788,9 +788,11 @@ func (sc stateComparisonTestContexts) Init(t *testing.T) stateComparisonTestCont case SingleSnapshot: tc.Name = "single-snapshot" tc.changes, err = newEnvChanges(tc.env) + _ = tc.changes.env.state.MultiTxSnapshotCommit() case MultiSnapshot: tc.Name = "multi-snapshot" tc.changes, err = newEnvChanges(tc.env) + _ = tc.changes.env.state.MultiTxSnapshotCommit() } require.NoError(t, err, "failed to initialize test contexts: %v", err) @@ -801,152 +803,165 @@ func (sc stateComparisonTestContexts) Init(t *testing.T) stateComparisonTestCont func TestStateComparisons(t *testing.T) { var testContexts = make(stateComparisonTestContexts, 3) - testContexts = testContexts.Init(t) // test commit tx - for i := 0; i < 3; i++ { - tx1 := testContexts[i].signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), - testContexts[i].signers.addresses[2], big.NewInt(0), []byte{}) - var ( - receipt *types.Receipt - status int - err error - ) - switch i { - case Baseline: - receipt, status, err = testContexts[i].envDiff.commitTx(tx1, testContexts[i].chainData) - testContexts[i].envDiff.applyToBaseEnv() + t.Run("state-compare-commit-tx", func(t *testing.T) { + testContexts = testContexts.Init(t) + for i := 0; i < 3; i++ { + tx1 := testContexts[i].signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), + testContexts[i].signers.addresses[2], big.NewInt(0), []byte{}) + var ( + receipt *types.Receipt + status int + err error + ) + switch i { + case Baseline: + receipt, status, err = testContexts[i].envDiff.commitTx(tx1, testContexts[i].chainData) + testContexts[i].envDiff.applyToBaseEnv() - case SingleSnapshot: - receipt, status, err = testContexts[i].changes.commitTx(tx1, testContexts[i].chainData) - require.NoError(t, err, "can't commit single snapshot tx") + case SingleSnapshot: + require.NoError(t, testContexts[i].changes.env.state.NewMultiTxSnapshot(), "can't create multi tx snapshot: %v", err) + receipt, status, err = testContexts[i].changes.commitTx(tx1, testContexts[i].chainData) + require.NoError(t, err, "can't commit single snapshot tx") - err = testContexts[i].changes.apply() - case MultiSnapshot: - receipt, status, err = testContexts[i].changes.commitTx(tx1, testContexts[i].chainData) - require.NoError(t, err, "can't commit multi snapshot tx") + err = testContexts[i].changes.apply() + case MultiSnapshot: + require.NoError(t, testContexts[i].changes.env.state.NewMultiTxSnapshot(), "can't create multi tx snapshot: %v", err) + receipt, status, err = testContexts[i].changes.commitTx(tx1, testContexts[i].chainData) + require.NoError(t, err, "can't commit multi snapshot tx") - err = testContexts[i].changes.apply() + err = testContexts[i].changes.apply() + } + require.NoError(t, err, "can't commit tx") + require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status) + require.Equal(t, 21000, int(receipt.GasUsed)) + require.Equal(t, shiftTx, status) } - require.NoError(t, err, "can't commit tx") - require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status) - require.Equal(t, 21000, int(receipt.GasUsed)) - require.Equal(t, shiftTx, status) - } - testContexts.UpdateRootHashes(t) - testContexts.ValidateTestCases(t, Baseline) - testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, Baseline) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + }) // test bundle - for i, tc := range testContexts { - var ( - signers = tc.signers - header = tc.env.header - env = tc.env - chData = tc.chainData - ) - - tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) - tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + t.Run("state-compare-bundle", func(t *testing.T) { + testContexts = testContexts.Init(t) + for i, tc := range testContexts { + var ( + signers = tc.signers + header = tc.env.header + env = tc.env + chData = tc.chainData + ) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + mevBundle := types.MevBundle{ + Txs: types.Transactions{tx1, tx2}, + BlockNumber: header.Number, + } - mevBundle := types.MevBundle{ - Txs: types.Transactions{tx1, tx2}, - BlockNumber: header.Number, - } + simBundle, err := simulateBundle(env, mevBundle, chData, nil) + require.NoError(t, err, "can't simulate bundle: %v", err) - simBundle, err := simulateBundle(env, mevBundle, chData, nil) - require.NoError(t, err, "can't simulate bundle: %v", err) + switch i { + case Baseline: + err = tc.envDiff.commitBundle(&simBundle, chData, nil, defaultAlgorithmConfig) + if err != nil { + break + } + tc.envDiff.applyToBaseEnv() - switch i { - case Baseline: - err = tc.envDiff.commitBundle(&simBundle, chData, nil, defaultAlgorithmConfig) - tc.envDiff.applyToBaseEnv() + case SingleSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, "can't create multi tx snapshot: %v", err) - case SingleSnapshot: - err = tc.changes.env.state.NewMultiTxSnapshot() - require.NoError(t, err, "can't create multi tx snapshot: %v", err) + err = tc.changes.commitBundle(&simBundle, chData, defaultAlgorithmConfig) + if err != nil { + break + } - err = tc.changes.commitBundle(&simBundle, chData, defaultAlgorithmConfig) - if err != nil { - break - } + err = tc.changes.apply() - err = tc.changes.apply() + case MultiSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, "can't create multi tx snapshot: %v", err) - case MultiSnapshot: - err = tc.changes.env.state.NewMultiTxSnapshot() - require.NoError(t, err, "can't create multi tx snapshot: %v", err) + err = tc.changes.commitBundle(&simBundle, chData, defaultAlgorithmConfig) + if err != nil { + break + } - err = tc.changes.commitBundle(&simBundle, chData, defaultAlgorithmConfig) - if err != nil { - break + err = tc.changes.apply() } - err = tc.changes.apply() + require.NoError(t, err, "can't commit bundle: %v", err) } - require.NoError(t, err, "can't commit bundle: %v", err) - } - - testContexts.UpdateRootHashes(t) - testContexts.ValidateTestCases(t, 0) - testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) - - // generate 100 transactions, with 50% of them failing - var ( - txCount = 100 - failEveryN = 2 - ) - testContexts = testContexts.Init(t) - testContexts.GenerateTransactions(t, txCount, failEveryN) - require.Len(t, testContexts[Baseline].transactions, txCount) - - for txIdx := 0; txIdx < txCount; txIdx++ { - for ctxIdx, tc := range testContexts { - tx := tc.transactions[txIdx] - - var commitErr error - switch ctxIdx { - case Baseline: - _, _, commitErr = tc.envDiff.commitTx(tx, tc.chainData) - tc.envDiff.applyToBaseEnv() - - case SingleSnapshot: - err := tc.changes.env.state.NewMultiTxSnapshot() - require.NoError(t, err, "can't create multi tx snapshot for tx %d: %v", txIdx, err) - - _, _, commitErr = tc.changes.commitTx(tx, tc.chainData) - require.NoError(t, tc.changes.apply()) - case MultiSnapshot: - err := tc.changes.env.state.NewMultiTxSnapshot() - require.NoError(t, err, - "can't create multi tx snapshot: %v", err) - - err = tc.changes.env.state.NewMultiTxSnapshot() - require.NoError(t, err, - "can't create multi tx snapshot: %v", err) - - _, _, commitErr = tc.changes.commitTx(tx, tc.chainData) - require.NoError(t, tc.changes.apply()) - - // NOTE(wazzymandias): At the time of writing this, the changes struct does not reset after performing - // an apply - because the intended use of the changes struct is to create it and discard it - // after every commit->(discard||apply) loop. - // So for now to test multiple snapshots we apply the changes for the top of the stack and - // then pop the underlying state snapshot from the base of the stack. - // Otherwise, if changes are applied twice, then there can be double counting of transactions. - require.NoError(t, tc.changes.env.state.MultiTxSnapshotCommit()) - } + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, 0) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + }) - if txIdx%failEveryN == 0 { - require.Errorf(t, commitErr, "tx %d should fail", txIdx) - } else { - require.NoError(t, commitErr, "tx %d should succeed, found: %v", txIdx, commitErr) + // test failed transactions + t.Run("state-compare-failed-txs", func(t *testing.T) { + // generate 100 transactions, with 50% of them failing + var ( + txCount = 100 + failEveryN = 2 + ) + testContexts = testContexts.Init(t) + testContexts.GenerateTransactions(t, txCount, failEveryN) + require.Len(t, testContexts[Baseline].transactions, txCount) + + for txIdx := 0; txIdx < txCount; txIdx++ { + for ctxIdx, tc := range testContexts { + tx := tc.transactions[txIdx] + + var commitErr error + switch ctxIdx { + case Baseline: + _, _, commitErr = tc.envDiff.commitTx(tx, tc.chainData) + tc.envDiff.applyToBaseEnv() + + case SingleSnapshot: + err := tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, "can't create multi tx snapshot for tx %d: %v", txIdx, err) + + _, _, commitErr = tc.changes.commitTx(tx, tc.chainData) + require.NoError(t, tc.changes.apply()) + case MultiSnapshot: + err := tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, + "can't create multi tx snapshot: %v", err) + + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, + "can't create multi tx snapshot: %v", err) + + _, _, commitErr = tc.changes.commitTx(tx, tc.chainData) + require.NoError(t, tc.changes.apply()) + + // NOTE(wazzymandias): At the time of writing this, the changes struct does not reset after performing + // an apply - because the intended use of the changes struct is to create it and discard it + // after every commit->(discard||apply) loop. + // So for now to test multiple snapshots we apply the changes for the top of the stack and + // then pop the underlying state snapshot from the base of the stack. + // Otherwise, if changes are applied twice, then there can be double counting of transactions. + require.NoError(t, tc.changes.env.state.MultiTxSnapshotCommit()) + } + + if txIdx%failEveryN == 0 { + require.Errorf(t, commitErr, "tx %d should fail", txIdx) + } else { + require.NoError(t, commitErr, "tx %d should succeed, found: %v", txIdx, commitErr) + } } } - } - testContexts.UpdateRootHashes(t) - testContexts.ValidateTestCases(t, 0) - testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, 0) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + }) } diff --git a/miner/env_changes.go b/miner/env_changes.go index 053ac8b538..c21b2c42d1 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -120,9 +120,7 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat ) for _, tx := range bundle.OriginalBundle.Txs { - //gasUsed := c.usedGas - //gasPool := new(core.GasPool).AddGas(c.gasPool.Gas()) - //txHash := tx.Hash() + txHash := tx.Hash() // TODO: Checks for base fee and dynamic fee txs should be moved to the transaction pool, // similar to mev-share bundles. See SBundlesPool.validateTx() for reference. if hasBaseFee && tx.Type() == types.DynamicFeeTxType { @@ -142,104 +140,36 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat break } } - if err := c.env.state.NewMultiTxSnapshot(); err != nil { - panic(err) - } receipt, _, err := c.commitTx(tx, chData) + if err != nil { - bundleErr = err - if err := c.env.state.MultiTxSnapshotRevert(); err != nil { - panic(err) + isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) + // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one + if algoConf.DropRevertibleTxOnErr && isRevertibleTx { + log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", + "tx", txHash, "err", err) + continue } + log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) + bundleErr = err break - } else { - if err := c.env.state.MultiTxSnapshotCommit(); err != nil { - panic(fmt.Sprintf("err: %v, receipt: %v", err, receipt)) + } + + if receipt != nil { + if receipt.Status == types.ReceiptStatusFailed && !bundle.OriginalBundle.RevertingHash(txHash) { + // if transaction reverted and isn't specified as reverting hash, return error + log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) + bundleErr = errors.New("bundle tx revert") } + } else { + // NOTE: The expectation is that a receipt is only nil if an error occurred. + // If there is no error but receipt is nil, there is likely a programming error. + bundleErr = errors.New("invalid receipt when no error occurred") + } + + if bundleErr != nil { + break } - //switch err { - //case nil: - // switch { - // case receipt == nil: - // panic("receipt is nil") - // case receipt.Status == types.ReceiptStatusFailed: - // isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) - // // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one - // if algoConf.DropRevertibleTxOnErr && isRevertibleTx { - // log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", - // "tx", txHash, "err", err) - // c.usedGas = gasUsed - // c.gasPool = gasPool - // if err := c.env.state.MultiTxSnapshotRevert(); err != nil { - // panic(err) - // } - // } else { - // bundleErr = errors.New("bundle tx revert") - // } - // case receipt.Status == types.ReceiptStatusSuccessful: - // fallthrough - // default: - // if err := c.env.state.MultiTxSnapshotCommit(); err != nil { - // panic(err) - // } - // } - //default: - // isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) - // // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one - // if algoConf.DropRevertibleTxOnErr && isRevertibleTx { - // log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", - // "tx", txHash, "err", err) - // c.usedGas = gasUsed - // c.gasPool = gasPool - // if err := c.env.state.MultiTxSnapshotRevert(); err != nil { - // panic(err) - // } - // } else { - // bundleErr = err - // } - //} - - //if bundleErr != nil { - // if err := c.env.state.MultiTxSnapshotRevert(); err != nil { - // panic(err) - // } - // break - //} - - //if err != nil { - // isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) - // // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one - // if algoConf.DropRevertibleTxOnErr && isRevertibleTx { - // log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", - // "tx", txHash, "err", err) - // if err := c.env.state.MultiTxSnapshotRevert(); err != nil { - // panic(err) - // } - // continue - // } - // log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) - // bundleErr = err - // break - //} - // - //if receipt != nil { - // if receipt.Status == types.ReceiptStatusFailed && !bundle.OriginalBundle.RevertingHash(txHash) { - // // if transaction reverted and isn't specified as reverting hash, return error - // log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) - // bundleErr = errors.New("bundle tx revert") - // } - //} else { - // // NOTE: The expectation is that a receipt is only nil if an error occurred. - // // If there is no error but receipt is nil, there is likely a programming error. - // bundleErr = errors.New("invalid receipt when no error occurred") - //} - // - //if bundleErr != nil { - // if err := c.env.state.MultiTxSnapshotRevert(); err != nil { - // panic(err) - // } - // break - //} } if bundleErr != nil { @@ -261,7 +191,7 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat ) if gasUsed == 0 { - actualEGP = big.NewInt(0) + return errors.New("bundle gas used is 0") } else { actualEGP = new(big.Int).Div(bundleProfit, big.NewInt(int64(gasUsed))) } From 18231631ac7c0ef1e3d32c9155be459d68b9195f Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Sat, 19 Aug 2023 18:39:41 -0500 Subject: [PATCH 34/46] small changes --- miner/algo_common.go | 2 -- miner/env_changes.go | 7 ++++++- miner/environment_diff.go | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index 2d27086b75..e0fdf5dfa4 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -178,7 +178,6 @@ func ValidateGasPriceAndProfit(algoConf algorithmConfig, actualPrice, expectedPr var errLowProfit *lowProfitError = nil if expectedPriceMultiple.Cmp(actualPriceMultiple) > 0 { - fmt.Println("egp diff", expectedPriceMultiple.String(), actualPriceMultiple.String()) errLowProfit = &lowProfitError{ ExpectedEffectiveGasPrice: expectedPrice, ActualEffectiveGasPrice: actualPrice, @@ -195,7 +194,6 @@ func ValidateGasPriceAndProfit(algoConf algorithmConfig, actualPrice, expectedPr if errLowProfit == nil { errLowProfit = new(lowProfitError) } - fmt.Println("profit diff") errLowProfit.ExpectedProfit = expectedProfit errLowProfit.ActualProfit = actualProfit } diff --git a/miner/env_changes.go b/miner/env_changes.go index c21b2c42d1..20cfb45131 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -99,7 +99,7 @@ func (c *envChanges) commitTx(tx *types.Transaction, chData chainData) (*types.R } } - c.profit.Add(c.profit, new(big.Int).Mul(new(big.Int).SetUint64(receipt.GasUsed), gasPrice)) + c.profit = c.profit.Add(c.profit, new(big.Int).Mul(new(big.Int).SetUint64(receipt.GasUsed), gasPrice)) c.txs = append(c.txs, tx) c.receipts = append(c.receipts, receipt) @@ -155,6 +155,10 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat break } + if bundleErr != nil { + break + } + if receipt != nil { if receipt.Status == types.ReceiptStatusFailed && !bundle.OriginalBundle.RevertingHash(txHash) { // if transaction reverted and isn't specified as reverting hash, return error @@ -191,6 +195,7 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat ) if gasUsed == 0 { + c.rollback(gasUsedBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) return errors.New("bundle gas used is 0") } else { actualEGP = new(big.Int).Div(bundleProfit, big.NewInt(int64(gasUsed))) diff --git a/miner/environment_diff.go b/miner/environment_diff.go index daefffa1e6..db036c2f04 100644 --- a/miner/environment_diff.go +++ b/miner/environment_diff.go @@ -207,7 +207,7 @@ func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chDa ) if gasUsed == 0 { - actualEGP = big.NewInt(0) + return errors.New("bundle gas used is 0") } else { actualEGP = new(big.Int).Div(bundleProfit, big.NewInt(int64(gasUsed))) } From 72f182fb6deb6f66d488290769857b2eb7ef85d7 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Mon, 21 Aug 2023 11:46:50 -0500 Subject: [PATCH 35/46] Add fuzz state using state smart contract, add Copy method for multi-tx, pass gas limit for test setup, add abigen bindings and abi for compiled state fuzz test smart contract --- core/state/multi_tx_snapshot.go | 63 ++ core/state/statedb.go | 8 +- miner/algo_common_test.go | 371 +--------- miner/algo_greedy_test.go | 4 +- miner/algo_state_test.go | 855 +++++++++++++++++++++++ miner/algo_test.go | 4 +- miner/env_changes_test.go | 14 +- miner/environment_diff.go | 4 + miner/sbundle_test.go | 2 +- miner/state_fuzz_test_abigen_bindings.go | 401 +++++++++++ miner/testdata/state_fuzz_test.abi | 1 + miner/verify_bundles_test.go | 6 +- 12 files changed, 1356 insertions(+), 377 deletions(-) create mode 100644 miner/algo_state_test.go create mode 100644 miner/state_fuzz_test_abigen_bindings.go create mode 100644 miner/testdata/state_fuzz_test.abi diff --git a/core/state/multi_tx_snapshot.go b/core/state/multi_tx_snapshot.go index 022cb76220..b66a9ee764 100644 --- a/core/state/multi_tx_snapshot.go +++ b/core/state/multi_tx_snapshot.go @@ -54,6 +54,60 @@ func newMultiTxSnapshot() MultiTxSnapshot { } } +func (s MultiTxSnapshot) Copy() MultiTxSnapshot { + newSnapshot := newMultiTxSnapshot() + newSnapshot.invalid = s.invalid + + for txHash, numLogs := range s.numLogsAdded { + newSnapshot.numLogsAdded[txHash] = numLogs + } + + for address, object := range s.prevObjects { + newSnapshot.prevObjects[address] = object + } + + for address, storage := range s.accountStorage { + newSnapshot.accountStorage[address] = make(map[common.Hash]*common.Hash) + for key, value := range storage { + newSnapshot.accountStorage[address][key] = value + } + } + + for address, balance := range s.accountBalance { + newSnapshot.accountBalance[address] = balance + } + + for address, nonce := range s.accountNonce { + newSnapshot.accountNonce[address] = nonce + } + + for address, code := range s.accountCode { + newSnapshot.accountCode[address] = code + } + + for address, codeHash := range s.accountCodeHash { + newSnapshot.accountCodeHash[address] = codeHash + } + + for address, suicided := range s.accountSuicided { + newSnapshot.accountSuicided[address] = suicided + } + + for address, deleted := range s.accountDeleted { + newSnapshot.accountDeleted[address] = deleted + } + + for address := range s.accountNotPending { + newSnapshot.accountNotPending[address] = struct{}{} + } + + for address := range s.accountNotDirty { + newSnapshot.accountNotDirty[address] = struct{}{} + } + + return newSnapshot +} + // Equal returns true if the two MultiTxSnapshot are equal func (s *MultiTxSnapshot) Equal(other *MultiTxSnapshot) bool { if other == nil { @@ -385,6 +439,7 @@ func (s *MultiTxSnapshot) revertState(st *StateDB) { // restore storage for address, storage := range s.accountStorage { + st.stateObjects[address].dirtyStorage = make(Storage) for key, value := range storage { if value == nil { if _, ok := st.stateObjects[address].pendingStorage[key]; !ok { @@ -462,6 +517,14 @@ func (stack *MultiTxSnapshotStack) NewSnapshot() (*MultiTxSnapshot, error) { return &snap, nil } +func (stack *MultiTxSnapshotStack) Copy(statedb *StateDB) *MultiTxSnapshotStack { + newStack := NewMultiTxSnapshotStack(statedb) + for _, snapshot := range stack.snapshots { + newStack.snapshots = append(newStack.snapshots, snapshot.Copy()) + } + return newStack +} + // Peek returns the snapshot at the top of the stack. func (stack *MultiTxSnapshotStack) Peek() *MultiTxSnapshot { if len(stack.snapshots) == 0 { diff --git a/core/state/statedb.go b/core/state/statedb.go index 7b2df7cf75..0cc41cd630 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -720,10 +720,10 @@ func (s *StateDB) Copy() *StateDB { // Initialize new multi-transaction snapshot stack for the copied state // NOTE(wazzymandias): We avoid copying the snapshot stack from the original state // because it may contain snapshots that are not valid for the copied state. - if s.multiTxSnapshotStack.Size() > 0 { - panic("cannot copy state with active multi-transaction snapshot stack") - } - state.multiTxSnapshotStack = NewMultiTxSnapshotStack(state) + //if s.multiTxSnapshotStack.Size() > 0 { + // panic("cannot copy state with active multi-transaction snapshot stack") + //} + state.multiTxSnapshotStack = s.multiTxSnapshotStack.Copy(state) // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), diff --git a/miner/algo_common_test.go b/miner/algo_common_test.go index f2bdda0447..6eb2984179 100644 --- a/miner/algo_common_test.go +++ b/miner/algo_common_test.go @@ -1,15 +1,12 @@ package miner import ( - "bytes" "crypto/ecdsa" "errors" "fmt" "math/big" "testing" - "github.com/ethereum/go-ethereum/rlp" - "github.com/stretchr/testify/require" mapset "github.com/deckarep/golang-set/v2" @@ -187,21 +184,22 @@ func genGenesisAlloc(sign signerList, contractAddr []common.Address, contractCod return genesisAlloc } -func genTestSetup() (*state.StateDB, chainData, signerList) { +func genTestSetup(gasLimit uint64) (*state.StateDB, chainData, signerList) { config := params.AllEthashProtocolChanges signerList := genSignerList(10, params.AllEthashProtocolChanges) genesisAlloc := genGenesisAlloc(signerList, []common.Address{payProxyAddress, logContractAddress}, [][]byte{payProxyCode, logContractCode}) - stateDB, chainData := genTestSetupWithAlloc(config, genesisAlloc) + stateDB, chainData := genTestSetupWithAlloc(config, genesisAlloc, gasLimit) return stateDB, chainData, signerList } -func genTestSetupWithAlloc(config *params.ChainConfig, alloc core.GenesisAlloc) (*state.StateDB, chainData) { +func genTestSetupWithAlloc(config *params.ChainConfig, alloc core.GenesisAlloc, gasLimit uint64) (*state.StateDB, chainData) { db := rawdb.NewMemoryDatabase() gspec := &core.Genesis{ - Config: config, - Alloc: alloc, + Config: config, + Alloc: alloc, + GasLimit: gasLimit, } _ = gspec.MustCommit(db) @@ -237,7 +235,7 @@ func newEnvironment(data chainData, state *state.StateDB, coinbase common.Addres } func TestTxCommit(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) envDiff := newEnvironmentDiff(env) @@ -284,7 +282,7 @@ func TestTxCommit(t *testing.T) { func TestBundleCommit(t *testing.T) { algoConf := defaultAlgorithmConfig - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) envDiff := newEnvironmentDiff(env) @@ -319,7 +317,7 @@ func TestBundleCommit(t *testing.T) { } func TestErrorTxCommit(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) envDiff := newEnvironmentDiff(env) @@ -353,7 +351,7 @@ func TestErrorTxCommit(t *testing.T) { } func TestCommitTxOverGasLimit(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], 21000, big.NewInt(1)) envDiff := newEnvironmentDiff(env) @@ -381,7 +379,7 @@ func TestCommitTxOverGasLimit(t *testing.T) { } func TestErrorBundleCommit(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], 21000*2, big.NewInt(1)) envDiff := newEnvironmentDiff(env) @@ -443,7 +441,7 @@ func TestErrorBundleCommit(t *testing.T) { } func TestBlacklist(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) envDiff := newEnvironmentDiff(env) @@ -555,7 +553,7 @@ func TestGetSealingWorkAlgosWithProfit(t *testing.T) { func TestPayoutTxUtils(t *testing.T) { availableFunds := big.NewInt(50000000000000000) // 0.05 eth - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) @@ -622,346 +620,3 @@ func TestPayoutTxUtils(t *testing.T) { require.Equal(t, env.state.GetNonce(signers.addresses[1]), uint64(3)) } - -const ( - Baseline = 0 - SingleSnapshot = 1 - MultiSnapshot = 2 -) - -type stateComparisonTestContext struct { - Name string - - statedb *state.StateDB - chainData chainData - signers signerList - - env *environment - - envDiff *environmentDiff - changes *envChanges - - transactions []*types.Transaction - - rootHash common.Hash -} - -type stateComparisonTestContexts []stateComparisonTestContext - -func (sc stateComparisonTestContexts) ValidateRootHashes(t *testing.T, expected common.Hash) { - for _, tc := range sc { - require.Equal(t, expected.Bytes(), tc.rootHash.Bytes(), - "root hash mismatch for test context %s [expected: %s] [found: %s]", - tc.Name, expected.TerminalString(), tc.rootHash.TerminalString()) - } -} - -func (sc stateComparisonTestContexts) GenerateTransactions(t *testing.T, txCount int, failEveryN int) { - for tcIndex, tc := range sc { - signers := tc.signers - tc.transactions = sc.generateTransactions(txCount, failEveryN, signers) - tc.signers = signers - require.Len(t, tc.transactions, txCount) - - sc[tcIndex] = tc - } -} - -func (sc stateComparisonTestContexts) generateTransactions(txCount int, failEveryN int, signers signerList) []*types.Transaction { - transactions := make([]*types.Transaction, 0, txCount) - for i := 0; i < txCount; i++ { - var data []byte - if failEveryN != 0 && i%failEveryN == 0 { - data = []byte{0x01} - } else { - data = []byte{} - } - - from := i % len(signers.addresses) - tx := signers.signTx(from, params.TxGas, big.NewInt(0), big.NewInt(1), - signers.addresses[(i+1)%len(signers.addresses)], big.NewInt(0), data) - transactions = append(transactions, tx) - } - - return transactions -} - -func (sc stateComparisonTestContexts) UpdateRootHashes(t *testing.T) { - for tcIndex, tc := range sc { - if tc.envDiff != nil { - tc.rootHash = tc.envDiff.baseEnvironment.state.IntermediateRoot(true) - } else { - tc.rootHash = tc.env.state.IntermediateRoot(true) - } - sc[tcIndex] = tc - - require.NotEmpty(t, tc.rootHash.Bytes(), "root hash is empty for test context %s", tc.Name) - } -} - -func (sc stateComparisonTestContexts) ValidateTestCases(t *testing.T, reference int) { - expected := sc[reference] - var ( - expectedGasPool *core.GasPool = expected.envDiff.baseEnvironment.gasPool - expectedHeader *types.Header = expected.envDiff.baseEnvironment.header - expectedProfit *big.Int = expected.envDiff.baseEnvironment.profit - expectedTxCount int = expected.envDiff.baseEnvironment.tcount - expectedTransactions []*types.Transaction = expected.envDiff.baseEnvironment.txs - expectedReceipts types.Receipts = expected.envDiff.baseEnvironment.receipts - ) - for tcIndex, tc := range sc { - if tcIndex == reference { - continue - } - - var ( - actualGasPool *core.GasPool = tc.env.gasPool - actualHeader *types.Header = tc.env.header - actualProfit *big.Int = tc.env.profit - actualTxCount int = tc.env.tcount - actualTransactions []*types.Transaction = tc.env.txs - actualReceipts types.Receipts = tc.env.receipts - ) - if actualGasPool.Gas() != expectedGasPool.Gas() { - t.Errorf("gas pool mismatch for test context %s [expected: %d] [found: %d]", - tc.Name, expectedGasPool.Gas(), actualGasPool.Gas()) - } - - if actualHeader.Hash() != expectedHeader.Hash() { - t.Errorf("header hash mismatch for test context %s [expected: %s] [found: %s]", - tc.Name, expectedHeader.Hash().TerminalString(), actualHeader.Hash().TerminalString()) - } - - if actualProfit.Cmp(expectedProfit) != 0 { - t.Errorf("profit mismatch for test context %s [expected: %d] [found: %d]", - tc.Name, expectedProfit, actualProfit) - } - - if actualTxCount != expectedTxCount { - t.Errorf("transaction count mismatch for test context %s [expected: %d] [found: %d]", - tc.Name, expectedTxCount, actualTxCount) - break - } - - if len(actualTransactions) != len(expectedTransactions) { - t.Errorf("transaction count mismatch for test context %s [expected: %d] [found: %d]", - tc.Name, len(expectedTransactions), len(actualTransactions)) - } - - for txIdx := 0; txIdx < len(actualTransactions); txIdx++ { - expectedTx := expectedTransactions[txIdx] - actualTx := actualTransactions[txIdx] - - expectedBytes, err := rlp.EncodeToBytes(expectedTx) - if err != nil { - t.Fatalf("failed to encode expected transaction #%d: %v", txIdx, err) - } - - actualBytes, err := rlp.EncodeToBytes(actualTx) - if err != nil { - t.Fatalf("failed to encode actual transaction #%d: %v", txIdx, err) - } - - if !bytes.Equal(expectedBytes, actualBytes) { - t.Errorf("transaction #%d mismatch for test context %s [expected: %v] [found: %v]", - txIdx, tc.Name, expectedTx, actualTx) - } - } - - if len(actualReceipts) != len(expectedReceipts) { - t.Errorf("receipt count mismatch for test context %s [expected: %d] [found: %d]", - tc.Name, len(expectedReceipts), len(actualReceipts)) - } - } -} - -func (sc stateComparisonTestContexts) Init(t *testing.T) stateComparisonTestContexts { - for i := range sc { - tc := stateComparisonTestContext{} - tc.statedb, tc.chainData, tc.signers = genTestSetup() - tc.env = newEnvironment(tc.chainData, tc.statedb, tc.signers.addresses[0], GasLimit, big.NewInt(1)) - var err error - switch i { - case Baseline: - tc.Name = "baseline" - tc.envDiff = newEnvironmentDiff(tc.env) - case SingleSnapshot: - tc.Name = "single-snapshot" - tc.changes, err = newEnvChanges(tc.env) - _ = tc.changes.env.state.MultiTxSnapshotCommit() - case MultiSnapshot: - tc.Name = "multi-snapshot" - tc.changes, err = newEnvChanges(tc.env) - _ = tc.changes.env.state.MultiTxSnapshotCommit() - } - - require.NoError(t, err, "failed to initialize test contexts: %v", err) - sc[i] = tc - } - return sc -} - -func TestStateComparisons(t *testing.T) { - var testContexts = make(stateComparisonTestContexts, 3) - - // test commit tx - t.Run("state-compare-commit-tx", func(t *testing.T) { - testContexts = testContexts.Init(t) - for i := 0; i < 3; i++ { - tx1 := testContexts[i].signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), - testContexts[i].signers.addresses[2], big.NewInt(0), []byte{}) - var ( - receipt *types.Receipt - status int - err error - ) - switch i { - case Baseline: - receipt, status, err = testContexts[i].envDiff.commitTx(tx1, testContexts[i].chainData) - testContexts[i].envDiff.applyToBaseEnv() - - case SingleSnapshot: - require.NoError(t, testContexts[i].changes.env.state.NewMultiTxSnapshot(), "can't create multi tx snapshot: %v", err) - receipt, status, err = testContexts[i].changes.commitTx(tx1, testContexts[i].chainData) - require.NoError(t, err, "can't commit single snapshot tx") - - err = testContexts[i].changes.apply() - case MultiSnapshot: - require.NoError(t, testContexts[i].changes.env.state.NewMultiTxSnapshot(), "can't create multi tx snapshot: %v", err) - receipt, status, err = testContexts[i].changes.commitTx(tx1, testContexts[i].chainData) - require.NoError(t, err, "can't commit multi snapshot tx") - - err = testContexts[i].changes.apply() - } - require.NoError(t, err, "can't commit tx") - require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status) - require.Equal(t, 21000, int(receipt.GasUsed)) - require.Equal(t, shiftTx, status) - } - - testContexts.UpdateRootHashes(t) - testContexts.ValidateTestCases(t, Baseline) - testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) - }) - - // test bundle - t.Run("state-compare-bundle", func(t *testing.T) { - testContexts = testContexts.Init(t) - for i, tc := range testContexts { - var ( - signers = tc.signers - header = tc.env.header - env = tc.env - chData = tc.chainData - ) - - tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) - tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) - - mevBundle := types.MevBundle{ - Txs: types.Transactions{tx1, tx2}, - BlockNumber: header.Number, - } - - simBundle, err := simulateBundle(env, mevBundle, chData, nil) - require.NoError(t, err, "can't simulate bundle: %v", err) - - switch i { - case Baseline: - err = tc.envDiff.commitBundle(&simBundle, chData, nil, defaultAlgorithmConfig) - if err != nil { - break - } - tc.envDiff.applyToBaseEnv() - - case SingleSnapshot: - err = tc.changes.env.state.NewMultiTxSnapshot() - require.NoError(t, err, "can't create multi tx snapshot: %v", err) - - err = tc.changes.commitBundle(&simBundle, chData, defaultAlgorithmConfig) - if err != nil { - break - } - - err = tc.changes.apply() - - case MultiSnapshot: - err = tc.changes.env.state.NewMultiTxSnapshot() - require.NoError(t, err, "can't create multi tx snapshot: %v", err) - - err = tc.changes.commitBundle(&simBundle, chData, defaultAlgorithmConfig) - if err != nil { - break - } - - err = tc.changes.apply() - } - - require.NoError(t, err, "can't commit bundle: %v", err) - } - - testContexts.UpdateRootHashes(t) - testContexts.ValidateTestCases(t, 0) - testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) - }) - - // test failed transactions - t.Run("state-compare-failed-txs", func(t *testing.T) { - // generate 100 transactions, with 50% of them failing - var ( - txCount = 100 - failEveryN = 2 - ) - testContexts = testContexts.Init(t) - testContexts.GenerateTransactions(t, txCount, failEveryN) - require.Len(t, testContexts[Baseline].transactions, txCount) - - for txIdx := 0; txIdx < txCount; txIdx++ { - for ctxIdx, tc := range testContexts { - tx := tc.transactions[txIdx] - - var commitErr error - switch ctxIdx { - case Baseline: - _, _, commitErr = tc.envDiff.commitTx(tx, tc.chainData) - tc.envDiff.applyToBaseEnv() - - case SingleSnapshot: - err := tc.changes.env.state.NewMultiTxSnapshot() - require.NoError(t, err, "can't create multi tx snapshot for tx %d: %v", txIdx, err) - - _, _, commitErr = tc.changes.commitTx(tx, tc.chainData) - require.NoError(t, tc.changes.apply()) - case MultiSnapshot: - err := tc.changes.env.state.NewMultiTxSnapshot() - require.NoError(t, err, - "can't create multi tx snapshot: %v", err) - - err = tc.changes.env.state.NewMultiTxSnapshot() - require.NoError(t, err, - "can't create multi tx snapshot: %v", err) - - _, _, commitErr = tc.changes.commitTx(tx, tc.chainData) - require.NoError(t, tc.changes.apply()) - - // NOTE(wazzymandias): At the time of writing this, the changes struct does not reset after performing - // an apply - because the intended use of the changes struct is to create it and discard it - // after every commit->(discard||apply) loop. - // So for now to test multiple snapshots we apply the changes for the top of the stack and - // then pop the underlying state snapshot from the base of the stack. - // Otherwise, if changes are applied twice, then there can be double counting of transactions. - require.NoError(t, tc.changes.env.state.MultiTxSnapshotCommit()) - } - - if txIdx%failEveryN == 0 { - require.Errorf(t, commitErr, "tx %d should fail", txIdx) - } else { - require.NoError(t, commitErr, "tx %d should succeed, found: %v", txIdx, commitErr) - } - } - } - testContexts.UpdateRootHashes(t) - testContexts.ValidateTestCases(t, 0) - testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) - }) -} diff --git a/miner/algo_greedy_test.go b/miner/algo_greedy_test.go index 4b460f0adc..c7d6aae682 100644 --- a/miner/algo_greedy_test.go +++ b/miner/algo_greedy_test.go @@ -13,7 +13,7 @@ import ( func TestBuildBlockGasLimit(t *testing.T) { algos := []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS} for _, algo := range algos { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], 21000, big.NewInt(1)) txs := make(map[common.Address]types.Transactions) @@ -51,7 +51,7 @@ func TestBuildBlockGasLimit(t *testing.T) { } func TestTxWithMinerFeeHeap(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], 21000, big.NewInt(1)) diff --git a/miner/algo_state_test.go b/miner/algo_state_test.go new file mode 100644 index 0000000000..f999f9c8fa --- /dev/null +++ b/miner/algo_state_test.go @@ -0,0 +1,855 @@ +package miner + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/require" + "math/big" + mathrand "math/rand" + "testing" +) + +// NOTE(wazzymandias): Below is a FuzzTest contract written in Solidity and shown here as reference code +// for the generated abi and bytecode used for testing. +// The generated abi can be found in the `testdata` directory. +// The abi, bytecode, and Go bindings were generated using the following commands: +// - docker run -v ${STATE_FUZZ_TEST_CONTRACT_DIRECTORY}:/sources +// ethereum/solc:0.8.19 -o /sources/output --abi --bin /sources/StateFuzzTest.sol +// - go run ./cmd/abigen/ --bin ${TARGET_STATE_FUZZ_TEST_BIN_PATH} --abi ${TARGET_STATE_FUZZ_TEST_ABI_PATH} +// --pkg statefuzztest --out=state_fuzz_test_abigen_bindings.go +const StateFuzzTestSolidity = ` +pragma solidity 0.8.19; + +contract StateFuzzTest { + mapping(address => uint256) public balances; + mapping(bytes32 => bytes) public storageData; + mapping(address => bool) public isSelfDestructed; + + function createObject(bytes32 key, bytes memory value) public { + storageData[key] = value; + } + + function resetObject(bytes32 key) public { + delete storageData[key]; + } + + function selfDestruct() public { + isSelfDestructed[msg.sender] = true; + selfdestruct(payable(msg.sender)); + } + + function changeBalance(address account, uint256 newBalance) public { + balances[account] = newBalance; + } + + function changeStorage(bytes32 key, bytes memory newValue) public { + storageData[key] = newValue; + } +} +` + +func changeBalanceFuzzTestContract(nonce uint64, to, address common.Address, newBalance *big.Int) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("changeBalance", address, newBalance) + if err != nil { + return nil, err + } + + return &types.LegacyTx{ + Nonce: nonce, + GasPrice: big.NewInt(1), + Gas: 10_000_000, + To: (*common.Address)(to[:]), + Value: big.NewInt(0), + Data: data, + }, nil +} + +func resetObjectFuzzTestContract(nonce uint64, address common.Address, key [32]byte) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("resetObject", key) + if err != nil { + return nil, err + } + + return &types.LegacyTx{ + Nonce: nonce, + GasPrice: big.NewInt(1), + Gas: 10_000_000, + To: (*common.Address)(address[:]), + Value: big.NewInt(0), + Data: data, + }, nil +} + +func createObjectFuzzTestContract(chainID *big.Int, nonce uint64, to common.Address, key [32]byte, value []byte) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("createObject", key, value) + if err != nil { + return nil, err + } + + return &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + Gas: 100_000, + GasFeeCap: big.NewInt(1), + To: (*common.Address)(to[:]), + Value: big.NewInt(0), + Data: data, + }, nil +} + +func selfDestructFuzzTestContract(chainID *big.Int, nonce uint64, to common.Address) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("selfDestruct") + if err != nil { + return nil, err + } + + return &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + Gas: 500_000, + GasFeeCap: big.NewInt(1), + To: (*common.Address)(to[:]), + Value: big.NewInt(0), + Data: data, + }, nil +} + +func changeStorageFuzzTestContract(chainID *big.Int, nonce uint64, to common.Address, key [32]byte, value []byte) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("changeStorage", key, value) + if err != nil { + return nil, err + } + + return &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + Gas: 100_000, + GasFeeCap: big.NewInt(1), + To: (*common.Address)(to[:]), + Value: big.NewInt(0), + Data: data, + }, nil +} + +const ( + Baseline = 0 + SingleSnapshot = 1 + MultiSnapshot = 2 +) + +type stateComparisonTestContext struct { + Name string + + statedb *state.StateDB + chainData chainData + signers signerList + + env *environment + + envDiff *environmentDiff + changes *envChanges + + transactions []*types.Transaction + + rootHash common.Hash +} + +type stateComparisonTestContexts []stateComparisonTestContext + +func (sc stateComparisonTestContexts) ValidateRootHashes(t *testing.T, expected common.Hash) { + for _, tc := range sc { + require.Equal(t, expected.Bytes(), tc.rootHash.Bytes(), + "root hash mismatch for test context %s [expected: %s] [found: %s]", + tc.Name, expected.TerminalString(), tc.rootHash.TerminalString()) + } +} + +func (sc stateComparisonTestContexts) GenerateTransactions(t *testing.T, txCount int, failEveryN int) { + for tcIndex, tc := range sc { + signers := tc.signers + tc.transactions = sc.generateTransactions(txCount, failEveryN, signers) + tc.signers = signers + require.Len(t, tc.transactions, txCount) + + sc[tcIndex] = tc + } +} + +func (sc stateComparisonTestContexts) generateTransactions(txCount int, failEveryN int, signers signerList) []*types.Transaction { + transactions := make([]*types.Transaction, 0, txCount) + for i := 0; i < txCount; i++ { + var data []byte + if failEveryN != 0 && i%failEveryN == 0 { + data = []byte{0x01} + } else { + data = []byte{} + } + + from := i % len(signers.addresses) + tx := signers.signTx(from, params.TxGas, big.NewInt(0), big.NewInt(1), + signers.addresses[(i+1)%len(signers.addresses)], big.NewInt(0), data) + transactions = append(transactions, tx) + } + + return transactions +} + +func (sc stateComparisonTestContexts) UpdateRootHashes(t *testing.T) { + for tcIndex, tc := range sc { + if tc.envDiff != nil { + tc.rootHash = tc.envDiff.baseEnvironment.state.IntermediateRoot(true) + } else { + tc.rootHash = tc.env.state.IntermediateRoot(true) + } + sc[tcIndex] = tc + + require.NotEmpty(t, tc.rootHash.Bytes(), "root hash is empty for test context %s", tc.Name) + } +} + +func (sc stateComparisonTestContexts) ValidateTestCases(t *testing.T, reference int) { + expected := sc[reference] + var ( + expectedGasPool *core.GasPool = expected.envDiff.baseEnvironment.gasPool + expectedHeader *types.Header = expected.envDiff.baseEnvironment.header + expectedProfit *big.Int = expected.envDiff.baseEnvironment.profit + expectedTxCount int = expected.envDiff.baseEnvironment.tcount + expectedTransactions []*types.Transaction = expected.envDiff.baseEnvironment.txs + expectedReceipts types.Receipts = expected.envDiff.baseEnvironment.receipts + ) + for tcIndex, tc := range sc { + if tcIndex == reference { + continue + } + + var ( + actualGasPool *core.GasPool = tc.env.gasPool + actualHeader *types.Header = tc.env.header + actualProfit *big.Int = tc.env.profit + actualTxCount int = tc.env.tcount + actualTransactions []*types.Transaction = tc.env.txs + actualReceipts types.Receipts = tc.env.receipts + ) + if actualGasPool.Gas() != expectedGasPool.Gas() { + t.Errorf("gas pool mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, expectedGasPool.Gas(), actualGasPool.Gas()) + } + + if actualHeader.Hash() != expectedHeader.Hash() { + t.Errorf("header hash mismatch for test context %s [expected: %s] [found: %s]", + tc.Name, expectedHeader.Hash().TerminalString(), actualHeader.Hash().TerminalString()) + } + + if actualProfit.Cmp(expectedProfit) != 0 { + t.Errorf("profit mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, expectedProfit, actualProfit) + } + + if actualTxCount != expectedTxCount { + t.Errorf("transaction count mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, expectedTxCount, actualTxCount) + break + } + + if len(actualTransactions) != len(expectedTransactions) { + t.Errorf("transaction count mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, len(expectedTransactions), len(actualTransactions)) + } + + for txIdx := 0; txIdx < len(actualTransactions); txIdx++ { + expectedTx := expectedTransactions[txIdx] + actualTx := actualTransactions[txIdx] + + expectedBytes, err := rlp.EncodeToBytes(expectedTx) + if err != nil { + t.Fatalf("failed to encode expected transaction #%d: %v", txIdx, err) + } + + actualBytes, err := rlp.EncodeToBytes(actualTx) + if err != nil { + t.Fatalf("failed to encode actual transaction #%d: %v", txIdx, err) + } + + if !bytes.Equal(expectedBytes, actualBytes) { + t.Errorf("transaction #%d mismatch for test context %s [expected: %v] [found: %v]", + txIdx, tc.Name, expectedTx, actualTx) + } + } + + if len(actualReceipts) != len(expectedReceipts) { + t.Errorf("receipt count mismatch for test context %s [expected: %d] [found: %d]", + tc.Name, len(expectedReceipts), len(actualReceipts)) + } + } +} + +func (sc stateComparisonTestContexts) Init(t *testing.T, gasLimit uint64) stateComparisonTestContexts { + for i := range sc { + tc := stateComparisonTestContext{} + tc.statedb, tc.chainData, tc.signers = genTestSetup(gasLimit) + tc.env = newEnvironment(tc.chainData, tc.statedb, tc.signers.addresses[0], gasLimit, big.NewInt(1)) + var err error + switch i { + case Baseline: + tc.Name = "baseline" + tc.envDiff = newEnvironmentDiff(tc.env) + case SingleSnapshot: + tc.Name = "single-snapshot" + tc.changes, err = newEnvChanges(tc.env) + _ = tc.changes.env.state.MultiTxSnapshotCommit() + case MultiSnapshot: + tc.Name = "multi-snapshot" + tc.changes, err = newEnvChanges(tc.env) + _ = tc.changes.env.state.MultiTxSnapshotCommit() + } + + require.NoError(t, err, "failed to initialize test contexts: %v", err) + sc[i] = tc + } + return sc +} + +func TestStateComparisons(t *testing.T) { + var testContexts = make(stateComparisonTestContexts, 3) + + // test commit tx + t.Run("state-compare-commit-tx", func(t *testing.T) { + testContexts = testContexts.Init(t, GasLimit) + for i := 0; i < 3; i++ { + tx1 := testContexts[i].signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), + testContexts[i].signers.addresses[2], big.NewInt(0), []byte{}) + var ( + receipt *types.Receipt + status int + err error + ) + switch i { + case Baseline: + receipt, status, err = testContexts[i].envDiff.commitTx(tx1, testContexts[i].chainData) + testContexts[i].envDiff.applyToBaseEnv() + + case SingleSnapshot: + require.NoError(t, testContexts[i].changes.env.state.NewMultiTxSnapshot(), "can't create multi tx snapshot: %v", err) + receipt, status, err = testContexts[i].changes.commitTx(tx1, testContexts[i].chainData) + require.NoError(t, err, "can't commit single snapshot tx") + + err = testContexts[i].changes.apply() + case MultiSnapshot: + require.NoError(t, testContexts[i].changes.env.state.NewMultiTxSnapshot(), "can't create multi tx snapshot: %v", err) + receipt, status, err = testContexts[i].changes.commitTx(tx1, testContexts[i].chainData) + require.NoError(t, err, "can't commit multi snapshot tx") + + err = testContexts[i].changes.apply() + } + require.NoError(t, err, "can't commit tx") + require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status) + require.Equal(t, 21000, int(receipt.GasUsed)) + require.Equal(t, shiftTx, status) + } + + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, Baseline) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + }) + + // test bundle + t.Run("state-compare-bundle", func(t *testing.T) { + testContexts = testContexts.Init(t, GasLimit) + for i, tc := range testContexts { + var ( + signers = tc.signers + header = tc.env.header + env = tc.env + chData = tc.chainData + ) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + mevBundle := types.MevBundle{ + Txs: types.Transactions{tx1, tx2}, + BlockNumber: header.Number, + } + + simBundle, err := simulateBundle(env, mevBundle, chData, nil) + require.NoError(t, err, "can't simulate bundle: %v", err) + + switch i { + case Baseline: + err = tc.envDiff.commitBundle(&simBundle, chData, nil, defaultAlgorithmConfig) + if err != nil { + break + } + tc.envDiff.applyToBaseEnv() + + case SingleSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, "can't create multi tx snapshot: %v", err) + + err = tc.changes.commitBundle(&simBundle, chData, defaultAlgorithmConfig) + if err != nil { + break + } + + err = tc.changes.apply() + + case MultiSnapshot: + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, "can't create multi tx snapshot: %v", err) + + err = tc.changes.commitBundle(&simBundle, chData, defaultAlgorithmConfig) + if err != nil { + break + } + + err = tc.changes.apply() + } + + require.NoError(t, err, "can't commit bundle: %v", err) + } + + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, 0) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + }) + + // test failed transactions + t.Run("state-compare-failed-txs", func(t *testing.T) { + // generate 100 transactions, with 50% of them failing + var ( + txCount = 100 + failEveryN = 2 + ) + testContexts = testContexts.Init(t, GasLimit) + testContexts.GenerateTransactions(t, txCount, failEveryN) + require.Len(t, testContexts[Baseline].transactions, txCount) + + for txIdx := 0; txIdx < txCount; txIdx++ { + for ctxIdx, tc := range testContexts { + tx := tc.transactions[txIdx] + + var commitErr error + switch ctxIdx { + case Baseline: + _, _, commitErr = tc.envDiff.commitTx(tx, tc.chainData) + tc.envDiff.applyToBaseEnv() + + case SingleSnapshot: + err := tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, "can't create multi tx snapshot for tx %d: %v", txIdx, err) + + _, _, commitErr = tc.changes.commitTx(tx, tc.chainData) + require.NoError(t, tc.changes.apply()) + case MultiSnapshot: + err := tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, + "can't create multi tx snapshot: %v", err) + + err = tc.changes.env.state.NewMultiTxSnapshot() + require.NoError(t, err, + "can't create multi tx snapshot: %v", err) + + _, _, commitErr = tc.changes.commitTx(tx, tc.chainData) + require.NoError(t, tc.changes.apply()) + + // NOTE(wazzymandias): At the time of writing this, the changes struct does not reset after performing + // an apply - because the intended use of the changes struct is to create it and discard it + // after every commit->(discard||apply) loop. + // So for now to test multiple snapshots we apply the changes for the top of the stack and + // then pop the underlying state snapshot from the base of the stack. + // Otherwise, if changes are applied twice, then there can be double counting of transactions. + require.NoError(t, tc.changes.env.state.MultiTxSnapshotCommit()) + } + + if txIdx%failEveryN == 0 { + require.Errorf(t, commitErr, "tx %d should fail", txIdx) + } else { + require.NoError(t, commitErr, "tx %d should succeed, found: %v", txIdx, commitErr) + } + } + } + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, 0) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + }) +} + +func TestBundles(t *testing.T) { + const maxGasLimit = 1_000_000_000_000 + + var testContexts = make(stateComparisonTestContexts, 3) + testContexts.Init(t, maxGasLimit) + + // Set up FuzzTest ABI and bytecode + abi, err := StatefuzztestMetaData.GetAbi() + require.NoError(t, err) + + fuzzTestSolBytecode := StatefuzztestMetaData.Bin + bytecodeBytes, err := hex.DecodeString(fuzzTestSolBytecode[2:]) + require.NoError(t, err) + + // FuzzTest constructor + deployData, err := abi.Pack("") + require.NoError(t, err) + + simulations := make([]*backends.SimulatedBackend, 3) + controlFuzzTestContracts := make(map[int][]*Statefuzztest, 3) + variantFuzzTestAddresses := make(map[int][]common.Address, 3) + + for tcIdx, tc := range testContexts { + disk := tc.env.state.Copy().Database().DiskDB() + db := rawdb.NewDatabase(disk) + + backend := backends.NewSimulatedBackendChain(db, tc.chainData.chain) + simulations[tcIdx] = backend + + s := tc.signers + controlFuzzTestContracts[tcIdx] = make([]*Statefuzztest, len(s.signers)) + variantFuzzTestAddresses[tcIdx] = make([]common.Address, len(s.signers)) + // commit transaction for deploying Fuzz Test contract + for i, pk := range s.signers { + deployTx := &types.LegacyTx{ + Nonce: s.nonces[i], + GasPrice: big.NewInt(1), + Gas: 10_000_000, + Value: big.NewInt(0), + To: nil, + Data: append(bytecodeBytes, deployData...), + } + + signTx := types.MustSignNewTx(pk, types.LatestSigner(s.config), deployTx) + + auth, err := bind.NewKeyedTransactorWithChainID(pk, tc.chainData.chainConfig.ChainID) + require.NoError(t, err) + + // deploy Fuzz Test contract to control chain (i.e, the chain we compare the test contexts against) + _, _, fuzz, err := DeployStatefuzztest(auth, backend) + require.NoError(t, err) + backend.Commit() + + controlFuzzTestContracts[tcIdx][i] = fuzz + + var receipt *types.Receipt + switch tcIdx { + case Baseline: + receipt, _, err = tc.envDiff.commitTx(signTx, tc.chainData) + require.NoError(t, err) + tc.envDiff.applyToBaseEnv() + + _, err = tc.envDiff.baseEnvironment.state.Commit(true) + case SingleSnapshot: + err = tc.env.state.NewMultiTxSnapshot() + require.NoError(t, err) + + receipt, _, err = tc.changes.commitTx(signTx, tc.chainData) + require.NoError(t, err) + + err = tc.changes.apply() + require.NoError(t, err) + + _, err = tc.changes.env.state.Commit(true) + + case MultiSnapshot: + err = tc.env.state.NewMultiTxSnapshot() + require.NoError(t, err) + + receipt, _, err = tc.changes.commitTx(signTx, tc.chainData) + require.NoError(t, err) + + err = tc.changes.apply() + require.NoError(t, err) + + _, err = tc.changes.env.state.Commit(true) + } + + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status) + variantFuzzTestAddresses[tcIdx][i] = receipt.ContractAddress + + s.nonces[i]++ + } + } + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, Baseline) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + + // initialize fuzz test contract for each account with random objects through createObject function + const createObjectCount = 100 + var randCreateObjectKeys = [createObjectCount][32]byte{} + var randCreateObjectValues = [createObjectCount][32]byte{} + for i := 0; i < createObjectCount; i++ { + _, err := rand.Read(randCreateObjectKeys[i][:]) + require.NoError(t, err) + + _, err = rand.Read(randCreateObjectValues[i][:]) + require.NoError(t, err) + } + + for tcIdx, tc := range testContexts { + backend := simulations[tcIdx] + + t.Run(fmt.Sprintf("%s-create-object", tc.Name), func(t *testing.T) { + signers := tc.signers + for signerIdx, pk := range signers.signers { + var ( + actualTransactions = [createObjectCount]*types.Transaction{} + expectedTransactions = [createObjectCount]*types.Transaction{} + expectedReceipts = [createObjectCount]*types.Receipt{} + to = variantFuzzTestAddresses[tcIdx][signerIdx] + ) + auth, err := bind.NewKeyedTransactorWithChainID(pk, tc.chainData.chainConfig.ChainID) + require.NoError(t, err) + + for txIdx := 0; txIdx < createObjectCount; txIdx++ { + var ( + createObjKey = randCreateObjectKeys[txIdx] + createObjValue = randCreateObjectValues[txIdx] + ) + tx, err := createObjectFuzzTestContract( + tc.chainData.chainConfig.ChainID, signers.nonces[signerIdx], to, createObjKey, createObjValue[:]) + require.NoError(t, err) + + actualTx := types.MustSignNewTx(pk, types.LatestSigner(signers.config), tx) + actualTransactions[txIdx] = actualTx + + expectedTx, err := + controlFuzzTestContracts[tcIdx][signerIdx].CreateObject(auth, createObjKey, createObjValue[:]) + require.NoError(t, err) + + expectedTransactions[txIdx] = expectedTx + + require.Equal(t, expectedTx.Data(), actualTx.Data()) + require.Equal(t, expectedTx.Nonce(), actualTx.Nonce()) + require.Equal(t, expectedTx.To().String(), actualTx.To().String()) + + // commit transaction for control chain (i.e, what we compare the test contexts against) + backend.Commit() + expectedReceipt, err := backend.TransactionReceipt(context.Background(), expectedTransactions[txIdx].Hash()) + require.NoError(t, err) + require.Equal(t, types.ReceiptStatusSuccessful, expectedReceipt.Status) + + expectedReceipts[txIdx] = expectedReceipt + + // update nonce + signers.nonces[signerIdx]++ + } + + for txIdx := 0; txIdx < createObjectCount; txIdx++ { + actualTx := actualTransactions[txIdx] + var actualReceipt *types.Receipt + switch tcIdx { + case Baseline: + actualReceipt, _, err = tc.envDiff.commitTx(actualTx, tc.chainData) + tc.envDiff.applyToBaseEnv() + case SingleSnapshot: + err = tc.env.state.NewMultiTxSnapshot() + require.NoError(t, err) + + actualReceipt, _, err = tc.changes.commitTx(actualTx, tc.chainData) + require.NoError(t, err) + + err = tc.changes.apply() + case MultiSnapshot: + err = tc.env.state.NewMultiTxSnapshot() + require.NoError(t, err) + + err = tc.env.state.NewMultiTxSnapshot() + require.NoError(t, err) + + actualReceipt, _, err = tc.changes.commitTx(actualTx, tc.chainData) + require.NoError(t, err) + + err = tc.changes.apply() + require.NoError(t, err) + + err = tc.env.state.MultiTxSnapshotCommit() + } + + require.NoError(t, err) + + expectedReceipt := expectedReceipts[txIdx] + require.Equal(t, expectedReceipt.PostState, actualReceipt.PostState) + require.Equal(t, expectedReceipt.ContractAddress.String(), actualReceipt.ContractAddress.String()) + require.Equal(t, types.ReceiptStatusSuccessful, actualReceipt.Status, "test %s, signer %d", tc.Name, signerIdx) + } + } + }) + } + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, Baseline) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) + + // generate bundles of transactions, where each transaction will either: + // - change balance + // - create object + // - self-destruct + // - reset object + // - change storage + type TransactionOperation int + const ( + ChangeBalance TransactionOperation = iota + CreateObject + SelfDestruct + ResetObject + ChangeStorage + ) + const ( + bundleCount = 5 + bundleSize = 10 + ) + + bundles := [bundleCount]types.MevBundle{} + for bundleIdx := 0; bundleIdx < bundleCount; bundleIdx++ { + transactions := [bundleSize]*types.Transaction{} + for txIdx := 0; txIdx < bundleSize; txIdx++ { + var ( + // pick a random integer that represents one of the transactions we will create + n = mathrand.Intn(5) + s = testContexts[0].signers + chainID = s.config.ChainID + // choose a random To Address index + toAddressRandomIdx = mathrand.Intn(len(s.signers)) + // reference the correct nonce for the associated To Address + nonce = s.nonces[toAddressRandomIdx] + toAddress = s.addresses[toAddressRandomIdx] + + txData types.TxData + err error + ) + switch TransactionOperation(n) { + case ChangeBalance: // change balance + balanceAddressRandomIdx := mathrand.Intn(len(s.signers)) + balanceAddress := s.addresses[balanceAddressRandomIdx] + + randomBalance := new(big.Int).SetUint64(mathrand.Uint64()) + + txData, err = changeBalanceFuzzTestContract(nonce, toAddress, balanceAddress, randomBalance) + + case CreateObject: // create object + var ( + key [32]byte + value [32]byte + ) + _, err = rand.Read(key[:]) + require.NoError(t, err) + + _, err = rand.Read(value[:]) + require.NoError(t, err) + + txData, err = createObjectFuzzTestContract(chainID, nonce, toAddress, key, value[:]) + + case SelfDestruct: // self-destruct + txData, err = selfDestructFuzzTestContract(chainID, nonce, toAddress) + + case ResetObject: // reset object + var ( + resetObjectRandomIdx = mathrand.Intn(createObjectCount) + resetObjectKey = randCreateObjectKeys[resetObjectRandomIdx] + fuzzContractAddress = variantFuzzTestAddresses[0][toAddressRandomIdx] + ) + txData, err = resetObjectFuzzTestContract(nonce, fuzzContractAddress, resetObjectKey) + + case ChangeStorage: // change storage + var ( + changeStorageRandomIdx = mathrand.Intn(createObjectCount) + changeStorageObjectKey = randCreateObjectKeys[changeStorageRandomIdx] + fuzzContractAddress = variantFuzzTestAddresses[0][toAddressRandomIdx] + value [32]byte + ) + _, err = rand.Read(value[:]) + require.NoError(t, err) + + txData, err = changeStorageFuzzTestContract(chainID, nonce, fuzzContractAddress, changeStorageObjectKey, value[:]) + } + require.NoError(t, err) + + tx := types.MustSignNewTx(s.signers[toAddressRandomIdx], types.LatestSigner(s.config), txData) + transactions[txIdx] = tx + s.nonces[toAddressRandomIdx]++ + } + + bundles[bundleIdx] = types.MevBundle{ + Txs: transactions[:], + } + } + for tcIdx, tc := range testContexts { + algoConf := defaultAlgorithmConfig + algoConf.EnforceProfit = true + switch tcIdx { + case SingleSnapshot, MultiSnapshot: + err = tc.env.state.NewMultiTxSnapshot() + require.NoError(t, err) + } + + for _, b := range bundles { + sim, err := simulateBundle(tc.env, b, tc.chainData, nil) + + switch tcIdx { + case Baseline: + err = tc.envDiff.commitBundle(&sim, tc.chainData, nil, algoConf) + case SingleSnapshot: + err = tc.changes.commitBundle(&sim, tc.chainData, algoConf) + case MultiSnapshot: + err = tc.changes.commitBundle(&sim, tc.chainData, algoConf) + } + var pe *lowProfitError + if errors.As(err, &pe) || (err != nil && err.Error() == "bundle mev gas price is nil") { + continue + } else { + require.NoError(t, err, "test %s", tc.Name) + } + } + + switch tcIdx { + case Baseline: + tc.envDiff.applyToBaseEnv() + case SingleSnapshot: + err = tc.changes.apply() + require.NoError(t, err) + case MultiSnapshot: + err = tc.changes.apply() + require.NoError(t, err) + } + } + testContexts.UpdateRootHashes(t) + testContexts.ValidateTestCases(t, Baseline) + testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) +} diff --git a/miner/algo_test.go b/miner/algo_test.go index ed1a644b64..5e3251467b 100644 --- a/miner/algo_test.go +++ b/miner/algo_test.go @@ -300,7 +300,7 @@ func runAlgoTest( txPool map[common.Address]types.Transactions, bundles []types.SimulatedBundle, header *types.Header, scale int, ) (gotProfit *big.Int, err error) { var ( - statedb, chData = genTestSetupWithAlloc(config, alloc) + statedb, chData = genTestSetupWithAlloc(config, alloc, GasLimit) env = newEnvironment(chData, statedb, header.Coinbase, header.GasLimit*uint64(scale), header.BaseFee) resultEnv *environment ) @@ -326,7 +326,7 @@ func runAlgoTest( // simulateBundles simulates bundles and returns the simulated bundles. func simulateBundles(config *params.ChainConfig, header *types.Header, alloc core.GenesisAlloc, bundles []types.MevBundle) ([]types.SimulatedBundle, error) { var ( - statedb, chData = genTestSetupWithAlloc(config, alloc) + statedb, chData = genTestSetupWithAlloc(config, alloc, GasLimit) env = newEnvironment(chData, statedb, header.Coinbase, header.GasLimit, header.BaseFee) simBundles = make([]types.SimulatedBundle, 0) diff --git a/miner/env_changes_test.go b/miner/env_changes_test.go index b7bbed4571..e5b4fc2740 100644 --- a/miner/env_changes_test.go +++ b/miner/env_changes_test.go @@ -10,7 +10,7 @@ import ( ) func TestTxCommitSnaps(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) @@ -58,7 +58,7 @@ func TestTxCommitSnaps(t *testing.T) { } } func TestBundleCommitSnaps(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) algoConf := defaultAlgorithmConfig algoConf.EnableMultiTxSnap = true @@ -99,7 +99,7 @@ func TestBundleCommitSnaps(t *testing.T) { } func TestErrorTxCommitSnaps(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) changes, err := newEnvChanges(env) @@ -136,7 +136,7 @@ func TestErrorTxCommitSnaps(t *testing.T) { } func TestCommitTxOverGasLimitSnaps(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], 21000, big.NewInt(1)) changes, err := newEnvChanges(env) @@ -167,7 +167,7 @@ func TestCommitTxOverGasLimitSnaps(t *testing.T) { } func TestErrorBundleCommitSnaps(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) algoConf := defaultAlgorithmConfig algoConf.EnableMultiTxSnap = true @@ -235,7 +235,7 @@ func TestErrorBundleCommitSnaps(t *testing.T) { } func TestErrorSBundleCommitSnaps(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], 21000*2, big.NewInt(1)) changes, err := newEnvChanges(env) @@ -310,7 +310,7 @@ func TestErrorSBundleCommitSnaps(t *testing.T) { } func TestBlacklistSnaps(t *testing.T) { - statedb, chData, signers := genTestSetup() + statedb, chData, signers := genTestSetup(GasLimit) // NOTE: intermediate root hash MUST be generated before env changes are instantiated, otherwise state.MultiTxSnapshot // will be invalidated and the test will fail diff --git a/miner/environment_diff.go b/miner/environment_diff.go index db036c2f04..b769264746 100644 --- a/miner/environment_diff.go +++ b/miner/environment_diff.go @@ -195,6 +195,10 @@ func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chDa coinbaseBalanceDelta := new(big.Int).Sub(coinbaseBalanceAfter, coinbaseBalanceBefore) tmpEnvDiff.newProfit.Add(profitBefore, coinbaseBalanceDelta) + if bundle.MevGasPrice == nil { + return errors.New("bundle mev gas price is nil") + } + var ( bundleProfit = coinbaseBalanceDelta // EGP = Effective Gas Price (Profit / GasUsed) diff --git a/miner/sbundle_test.go b/miner/sbundle_test.go index 4278db2461..6cf34a2b6b 100644 --- a/miner/sbundle_test.go +++ b/miner/sbundle_test.go @@ -452,7 +452,7 @@ func TestSBundles(t *testing.T) { var ( config = params.TestChainConfig signer = types.LatestSigner(config) - statedb, chData = genTestSetupWithAlloc(config, testSuite.GenesisAlloc) + statedb, chData = genTestSetupWithAlloc(config, testSuite.GenesisAlloc, GasLimit) env = newEnvironment(chData, statedb, testSuite.Header.Coinbase, testSuite.Header.GasLimit, testSuite.Header.BaseFee) envDiff = newEnvironmentDiff(env) diff --git a/miner/state_fuzz_test_abigen_bindings.go b/miner/state_fuzz_test_abigen_bindings.go new file mode 100644 index 0000000000..1f2d076628 --- /dev/null +++ b/miner/state_fuzz_test_abigen_bindings.go @@ -0,0 +1,401 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package miner + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// StatefuzztestMetaData contains all meta data concerning the Statefuzztest contract. +var StatefuzztestMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"balances\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"changeBalance\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"newValue\",\"type\":\"bytes\"}],\"name\":\"changeStorage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"value\",\"type\":\"bytes\"}],\"name\":\"createObject\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"isSelfDestructed\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"}],\"name\":\"resetObject\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"selfDestruct\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"storageData\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610b1f806100206000396000f3fe608060405234801561001057600080fd5b50600436106100885760003560e01c8063b0d50e381161005b578063b0d50e38146100ff578063c522de441461012f578063d58010651461015f578063f529d4481461017b57610088565b806327e235e31461008d5780637a5ae62e146100bd5780639cb8a26a146100d9578063a2601e0a146100e3575b600080fd5b6100a760048036038101906100a29190610462565b610197565b6040516100b491906104a8565b60405180910390f35b6100d760048036038101906100d291906104f9565b6101af565b005b6100e16101d1565b005b6100fd60048036038101906100f8919061066c565b610242565b005b61011960048036038101906101149190610462565b610267565b60405161012691906106e3565b60405180910390f35b610149600480360381019061014491906104f9565b610287565b604051610156919061077d565b60405180910390f35b6101796004803603810190610174919061066c565b610327565b005b610195600480360381019061019091906107cb565b61034c565b005b60006020528060005260406000206000915090505481565b6001600082815260200190815260200160002060006101ce9190610393565b50565b6001600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff0219169083151502179055503373ffffffffffffffffffffffffffffffffffffffff16ff5b806001600084815260200190815260200160002090816102629190610a17565b505050565b60026020528060005260406000206000915054906101000a900460ff1681565b600160205280600052604060002060009150905080546102a69061083a565b80601f01602080910402602001604051908101604052809291908181526020018280546102d29061083a565b801561031f5780601f106102f45761010080835404028352916020019161031f565b820191906000526020600020905b81548152906001019060200180831161030257829003601f168201915b505050505081565b806001600084815260200190815260200160002090816103479190610a17565b505050565b806000808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055505050565b50805461039f9061083a565b6000825580601f106103b157506103d0565b601f0160209004906000526020600020908101906103cf91906103d3565b5b50565b5b808211156103ec5760008160009055506001016103d4565b5090565b6000604051905090565b600080fd5b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b600061042f82610404565b9050919050565b61043f81610424565b811461044a57600080fd5b50565b60008135905061045c81610436565b92915050565b600060208284031215610478576104776103fa565b5b60006104868482850161044d565b91505092915050565b6000819050919050565b6104a28161048f565b82525050565b60006020820190506104bd6000830184610499565b92915050565b6000819050919050565b6104d6816104c3565b81146104e157600080fd5b50565b6000813590506104f3816104cd565b92915050565b60006020828403121561050f5761050e6103fa565b5b600061051d848285016104e4565b91505092915050565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61057982610530565b810181811067ffffffffffffffff8211171561059857610597610541565b5b80604052505050565b60006105ab6103f0565b90506105b78282610570565b919050565b600067ffffffffffffffff8211156105d7576105d6610541565b5b6105e082610530565b9050602081019050919050565b82818337600083830152505050565b600061060f61060a846105bc565b6105a1565b90508281526020810184848401111561062b5761062a61052b565b5b6106368482856105ed565b509392505050565b600082601f83011261065357610652610526565b5b81356106638482602086016105fc565b91505092915050565b60008060408385031215610683576106826103fa565b5b6000610691858286016104e4565b925050602083013567ffffffffffffffff8111156106b2576106b16103ff565b5b6106be8582860161063e565b9150509250929050565b60008115159050919050565b6106dd816106c8565b82525050565b60006020820190506106f860008301846106d4565b92915050565b600081519050919050565b600082825260208201905092915050565b60005b8381101561073857808201518184015260208101905061071d565b60008484015250505050565b600061074f826106fe565b6107598185610709565b935061076981856020860161071a565b61077281610530565b840191505092915050565b600060208201905081810360008301526107978184610744565b905092915050565b6107a88161048f565b81146107b357600080fd5b50565b6000813590506107c58161079f565b92915050565b600080604083850312156107e2576107e16103fa565b5b60006107f08582860161044d565b9250506020610801858286016107b6565b9150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b6000600282049050600182168061085257607f821691505b6020821081036108655761086461080b565b5b50919050565b60008190508160005260206000209050919050565b60006020601f8301049050919050565b600082821b905092915050565b6000600883026108cd7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82610890565b6108d78683610890565b95508019841693508086168417925050509392505050565b6000819050919050565b600061091461090f61090a8461048f565b6108ef565b61048f565b9050919050565b6000819050919050565b61092e836108f9565b61094261093a8261091b565b84845461089d565b825550505050565b600090565b61095761094a565b610962818484610925565b505050565b5b818110156109865761097b60008261094f565b600181019050610968565b5050565b601f8211156109cb5761099c8161086b565b6109a584610880565b810160208510156109b4578190505b6109c86109c085610880565b830182610967565b50505b505050565b600082821c905092915050565b60006109ee600019846008026109d0565b1980831691505092915050565b6000610a0783836109dd565b9150826002028217905092915050565b610a20826106fe565b67ffffffffffffffff811115610a3957610a38610541565b5b610a43825461083a565b610a4e82828561098a565b600060209050601f831160018114610a815760008415610a6f578287015190505b610a7985826109fb565b865550610ae1565b601f198416610a8f8661086b565b60005b82811015610ab757848901518255600182019150602085019450602081019050610a92565b86831015610ad45784890151610ad0601f8916826109dd565b8355505b6001600288020188555050505b50505050505056fea26469706673582212202f3e2761204e887bab7c8f092e2346bad94e865f80979db9a6915f9d2bdbc03c64736f6c63430008130033", +} + +// StatefuzztestABI is the input ABI used to generate the binding from. +// Deprecated: Use StatefuzztestMetaData.ABI instead. +var StatefuzztestABI = StatefuzztestMetaData.ABI + +// StatefuzztestBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use StatefuzztestMetaData.Bin instead. +var StatefuzztestBin = StatefuzztestMetaData.Bin + +// DeployStatefuzztest deploys a new Ethereum contract, binding an instance of Statefuzztest to it. +func DeployStatefuzztest(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Statefuzztest, error) { + parsed, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(StatefuzztestBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Statefuzztest{StatefuzztestCaller: StatefuzztestCaller{contract: contract}, StatefuzztestTransactor: StatefuzztestTransactor{contract: contract}, StatefuzztestFilterer: StatefuzztestFilterer{contract: contract}}, nil +} + +// Statefuzztest is an auto generated Go binding around an Ethereum contract. +type Statefuzztest struct { + StatefuzztestCaller // Read-only binding to the contract + StatefuzztestTransactor // Write-only binding to the contract + StatefuzztestFilterer // Log filterer for contract events +} + +// StatefuzztestCaller is an auto generated read-only Go binding around an Ethereum contract. +type StatefuzztestCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// StatefuzztestTransactor is an auto generated write-only Go binding around an Ethereum contract. +type StatefuzztestTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// StatefuzztestFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type StatefuzztestFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// StatefuzztestSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type StatefuzztestSession struct { + Contract *Statefuzztest // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// StatefuzztestCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type StatefuzztestCallerSession struct { + Contract *StatefuzztestCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// StatefuzztestTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type StatefuzztestTransactorSession struct { + Contract *StatefuzztestTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// StatefuzztestRaw is an auto generated low-level Go binding around an Ethereum contract. +type StatefuzztestRaw struct { + Contract *Statefuzztest // Generic contract binding to access the raw methods on +} + +// StatefuzztestCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type StatefuzztestCallerRaw struct { + Contract *StatefuzztestCaller // Generic read-only contract binding to access the raw methods on +} + +// StatefuzztestTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type StatefuzztestTransactorRaw struct { + Contract *StatefuzztestTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewStatefuzztest creates a new instance of Statefuzztest, bound to a specific deployed contract. +func NewStatefuzztest(address common.Address, backend bind.ContractBackend) (*Statefuzztest, error) { + contract, err := bindStatefuzztest(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Statefuzztest{StatefuzztestCaller: StatefuzztestCaller{contract: contract}, StatefuzztestTransactor: StatefuzztestTransactor{contract: contract}, StatefuzztestFilterer: StatefuzztestFilterer{contract: contract}}, nil +} + +// NewStatefuzztestCaller creates a new read-only instance of Statefuzztest, bound to a specific deployed contract. +func NewStatefuzztestCaller(address common.Address, caller bind.ContractCaller) (*StatefuzztestCaller, error) { + contract, err := bindStatefuzztest(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &StatefuzztestCaller{contract: contract}, nil +} + +// NewStatefuzztestTransactor creates a new write-only instance of Statefuzztest, bound to a specific deployed contract. +func NewStatefuzztestTransactor(address common.Address, transactor bind.ContractTransactor) (*StatefuzztestTransactor, error) { + contract, err := bindStatefuzztest(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &StatefuzztestTransactor{contract: contract}, nil +} + +// NewStatefuzztestFilterer creates a new log filterer instance of Statefuzztest, bound to a specific deployed contract. +func NewStatefuzztestFilterer(address common.Address, filterer bind.ContractFilterer) (*StatefuzztestFilterer, error) { + contract, err := bindStatefuzztest(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &StatefuzztestFilterer{contract: contract}, nil +} + +// bindStatefuzztest binds a generic wrapper to an already deployed contract. +func bindStatefuzztest(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Statefuzztest *StatefuzztestRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Statefuzztest.Contract.StatefuzztestCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Statefuzztest *StatefuzztestRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Statefuzztest.Contract.StatefuzztestTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Statefuzztest *StatefuzztestRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Statefuzztest.Contract.StatefuzztestTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Statefuzztest *StatefuzztestCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Statefuzztest.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Statefuzztest *StatefuzztestTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Statefuzztest.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Statefuzztest *StatefuzztestTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Statefuzztest.Contract.contract.Transact(opts, method, params...) +} + +// Balances is a free data retrieval call binding the contract method 0x27e235e3. +// +// Solidity: function balances(address ) view returns(uint256) +func (_Statefuzztest *StatefuzztestCaller) Balances(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) { + var out []interface{} + err := _Statefuzztest.contract.Call(opts, &out, "balances", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// Balances is a free data retrieval call binding the contract method 0x27e235e3. +// +// Solidity: function balances(address ) view returns(uint256) +func (_Statefuzztest *StatefuzztestSession) Balances(arg0 common.Address) (*big.Int, error) { + return _Statefuzztest.Contract.Balances(&_Statefuzztest.CallOpts, arg0) +} + +// Balances is a free data retrieval call binding the contract method 0x27e235e3. +// +// Solidity: function balances(address ) view returns(uint256) +func (_Statefuzztest *StatefuzztestCallerSession) Balances(arg0 common.Address) (*big.Int, error) { + return _Statefuzztest.Contract.Balances(&_Statefuzztest.CallOpts, arg0) +} + +// IsSelfDestructed is a free data retrieval call binding the contract method 0xb0d50e38. +// +// Solidity: function isSelfDestructed(address ) view returns(bool) +func (_Statefuzztest *StatefuzztestCaller) IsSelfDestructed(opts *bind.CallOpts, arg0 common.Address) (bool, error) { + var out []interface{} + err := _Statefuzztest.contract.Call(opts, &out, "isSelfDestructed", arg0) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// IsSelfDestructed is a free data retrieval call binding the contract method 0xb0d50e38. +// +// Solidity: function isSelfDestructed(address ) view returns(bool) +func (_Statefuzztest *StatefuzztestSession) IsSelfDestructed(arg0 common.Address) (bool, error) { + return _Statefuzztest.Contract.IsSelfDestructed(&_Statefuzztest.CallOpts, arg0) +} + +// IsSelfDestructed is a free data retrieval call binding the contract method 0xb0d50e38. +// +// Solidity: function isSelfDestructed(address ) view returns(bool) +func (_Statefuzztest *StatefuzztestCallerSession) IsSelfDestructed(arg0 common.Address) (bool, error) { + return _Statefuzztest.Contract.IsSelfDestructed(&_Statefuzztest.CallOpts, arg0) +} + +// StorageData is a free data retrieval call binding the contract method 0xc522de44. +// +// Solidity: function storageData(bytes32 ) view returns(bytes) +func (_Statefuzztest *StatefuzztestCaller) StorageData(opts *bind.CallOpts, arg0 [32]byte) ([]byte, error) { + var out []interface{} + err := _Statefuzztest.contract.Call(opts, &out, "storageData", arg0) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +// StorageData is a free data retrieval call binding the contract method 0xc522de44. +// +// Solidity: function storageData(bytes32 ) view returns(bytes) +func (_Statefuzztest *StatefuzztestSession) StorageData(arg0 [32]byte) ([]byte, error) { + return _Statefuzztest.Contract.StorageData(&_Statefuzztest.CallOpts, arg0) +} + +// StorageData is a free data retrieval call binding the contract method 0xc522de44. +// +// Solidity: function storageData(bytes32 ) view returns(bytes) +func (_Statefuzztest *StatefuzztestCallerSession) StorageData(arg0 [32]byte) ([]byte, error) { + return _Statefuzztest.Contract.StorageData(&_Statefuzztest.CallOpts, arg0) +} + +// ChangeBalance is a paid mutator transaction binding the contract method 0xf529d448. +// +// Solidity: function changeBalance(address account, uint256 newBalance) returns() +func (_Statefuzztest *StatefuzztestTransactor) ChangeBalance(opts *bind.TransactOpts, account common.Address, newBalance *big.Int) (*types.Transaction, error) { + return _Statefuzztest.contract.Transact(opts, "changeBalance", account, newBalance) +} + +// ChangeBalance is a paid mutator transaction binding the contract method 0xf529d448. +// +// Solidity: function changeBalance(address account, uint256 newBalance) returns() +func (_Statefuzztest *StatefuzztestSession) ChangeBalance(account common.Address, newBalance *big.Int) (*types.Transaction, error) { + return _Statefuzztest.Contract.ChangeBalance(&_Statefuzztest.TransactOpts, account, newBalance) +} + +// ChangeBalance is a paid mutator transaction binding the contract method 0xf529d448. +// +// Solidity: function changeBalance(address account, uint256 newBalance) returns() +func (_Statefuzztest *StatefuzztestTransactorSession) ChangeBalance(account common.Address, newBalance *big.Int) (*types.Transaction, error) { + return _Statefuzztest.Contract.ChangeBalance(&_Statefuzztest.TransactOpts, account, newBalance) +} + +// ChangeStorage is a paid mutator transaction binding the contract method 0xa2601e0a. +// +// Solidity: function changeStorage(bytes32 key, bytes newValue) returns() +func (_Statefuzztest *StatefuzztestTransactor) ChangeStorage(opts *bind.TransactOpts, key [32]byte, newValue []byte) (*types.Transaction, error) { + return _Statefuzztest.contract.Transact(opts, "changeStorage", key, newValue) +} + +// ChangeStorage is a paid mutator transaction binding the contract method 0xa2601e0a. +// +// Solidity: function changeStorage(bytes32 key, bytes newValue) returns() +func (_Statefuzztest *StatefuzztestSession) ChangeStorage(key [32]byte, newValue []byte) (*types.Transaction, error) { + return _Statefuzztest.Contract.ChangeStorage(&_Statefuzztest.TransactOpts, key, newValue) +} + +// ChangeStorage is a paid mutator transaction binding the contract method 0xa2601e0a. +// +// Solidity: function changeStorage(bytes32 key, bytes newValue) returns() +func (_Statefuzztest *StatefuzztestTransactorSession) ChangeStorage(key [32]byte, newValue []byte) (*types.Transaction, error) { + return _Statefuzztest.Contract.ChangeStorage(&_Statefuzztest.TransactOpts, key, newValue) +} + +// CreateObject is a paid mutator transaction binding the contract method 0xd5801065. +// +// Solidity: function createObject(bytes32 key, bytes value) returns() +func (_Statefuzztest *StatefuzztestTransactor) CreateObject(opts *bind.TransactOpts, key [32]byte, value []byte) (*types.Transaction, error) { + return _Statefuzztest.contract.Transact(opts, "createObject", key, value) +} + +// CreateObject is a paid mutator transaction binding the contract method 0xd5801065. +// +// Solidity: function createObject(bytes32 key, bytes value) returns() +func (_Statefuzztest *StatefuzztestSession) CreateObject(key [32]byte, value []byte) (*types.Transaction, error) { + return _Statefuzztest.Contract.CreateObject(&_Statefuzztest.TransactOpts, key, value) +} + +// CreateObject is a paid mutator transaction binding the contract method 0xd5801065. +// +// Solidity: function createObject(bytes32 key, bytes value) returns() +func (_Statefuzztest *StatefuzztestTransactorSession) CreateObject(key [32]byte, value []byte) (*types.Transaction, error) { + return _Statefuzztest.Contract.CreateObject(&_Statefuzztest.TransactOpts, key, value) +} + +// ResetObject is a paid mutator transaction binding the contract method 0x7a5ae62e. +// +// Solidity: function resetObject(bytes32 key) returns() +func (_Statefuzztest *StatefuzztestTransactor) ResetObject(opts *bind.TransactOpts, key [32]byte) (*types.Transaction, error) { + return _Statefuzztest.contract.Transact(opts, "resetObject", key) +} + +// ResetObject is a paid mutator transaction binding the contract method 0x7a5ae62e. +// +// Solidity: function resetObject(bytes32 key) returns() +func (_Statefuzztest *StatefuzztestSession) ResetObject(key [32]byte) (*types.Transaction, error) { + return _Statefuzztest.Contract.ResetObject(&_Statefuzztest.TransactOpts, key) +} + +// ResetObject is a paid mutator transaction binding the contract method 0x7a5ae62e. +// +// Solidity: function resetObject(bytes32 key) returns() +func (_Statefuzztest *StatefuzztestTransactorSession) ResetObject(key [32]byte) (*types.Transaction, error) { + return _Statefuzztest.Contract.ResetObject(&_Statefuzztest.TransactOpts, key) +} + +// SelfDestruct is a paid mutator transaction binding the contract method 0x9cb8a26a. +// +// Solidity: function selfDestruct() returns() +func (_Statefuzztest *StatefuzztestTransactor) SelfDestruct(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Statefuzztest.contract.Transact(opts, "selfDestruct") +} + +// SelfDestruct is a paid mutator transaction binding the contract method 0x9cb8a26a. +// +// Solidity: function selfDestruct() returns() +func (_Statefuzztest *StatefuzztestSession) SelfDestruct() (*types.Transaction, error) { + return _Statefuzztest.Contract.SelfDestruct(&_Statefuzztest.TransactOpts) +} + +// SelfDestruct is a paid mutator transaction binding the contract method 0x9cb8a26a. +// +// Solidity: function selfDestruct() returns() +func (_Statefuzztest *StatefuzztestTransactorSession) SelfDestruct() (*types.Transaction, error) { + return _Statefuzztest.Contract.SelfDestruct(&_Statefuzztest.TransactOpts) +} diff --git a/miner/testdata/state_fuzz_test.abi b/miner/testdata/state_fuzz_test.abi new file mode 100644 index 0000000000..fc0178be7b --- /dev/null +++ b/miner/testdata/state_fuzz_test.abi @@ -0,0 +1 @@ +[{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"balances","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"},{"internalType":"uint256","name":"newBalance","type":"uint256"}],"name":"changeBalance","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"key","type":"bytes32"},{"internalType":"bytes","name":"newValue","type":"bytes"}],"name":"changeStorage","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"key","type":"bytes32"},{"internalType":"bytes","name":"value","type":"bytes"}],"name":"createObject","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"isSelfDestructed","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"key","type":"bytes32"}],"name":"resetObject","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"selfDestruct","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"name":"storageData","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"}] \ No newline at end of file diff --git a/miner/verify_bundles_test.go b/miner/verify_bundles_test.go index c4d12e39d9..0b225faeea 100644 --- a/miner/verify_bundles_test.go +++ b/miner/verify_bundles_test.go @@ -430,7 +430,7 @@ func TestVerifyBundlesAtomicity(t *testing.T) { } func TestExtractBundleDataFromUsedBundles(t *testing.T) { - _, _, signers := genTestSetup() + _, _, signers := genTestSetup(GasLimit) tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) @@ -478,7 +478,7 @@ func TestExtractBundleDataFromUsedBundles(t *testing.T) { } func TestExtractIncludedTxDataFromEnv(t *testing.T) { - _, _, signers := genTestSetup() + _, _, signers := genTestSetup(GasLimit) tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) @@ -536,7 +536,7 @@ func TestExtractPrivateTxData(t *testing.T) { } func BenchmarkVerifyBundlesAtomicity(b *testing.B) { - _, _, signers := genTestSetup() + _, _, signers := genTestSetup(GasLimit) var ( env = &environment{} From 0259e064b27484844128a9aef2c1eca66ed1ce21 Mon Sep 17 00:00:00 2001 From: Vitaly Drogan Date: Mon, 21 Aug 2023 22:22:59 +0300 Subject: [PATCH 36/46] fix a bug with state reverts of accounts that are not touched according to the journal (#102) --- core/state/multi_tx_snapshot.go | 25 +++++++++++++++++++++++++ core/state/multi_tx_snapshot_test.go | 19 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/core/state/multi_tx_snapshot.go b/core/state/multi_tx_snapshot.go index b66a9ee764..78c4ad92e0 100644 --- a/core/state/multi_tx_snapshot.go +++ b/core/state/multi_tx_snapshot.go @@ -29,6 +29,10 @@ type MultiTxSnapshot struct { accountNotPending map[common.Address]struct{} accountNotDirty map[common.Address]struct{} + // touched accounts are accounts that can be affected when snapshot is reverted + // we clear dirty storage for touched accounts when snapshot is reverted + touchedAccounts map[common.Address]struct{} + // TODO: snapdestructs, snapaccount storage } @@ -51,6 +55,7 @@ func newMultiTxSnapshot() MultiTxSnapshot { accountDeleted: make(map[common.Address]bool), accountNotPending: make(map[common.Address]struct{}), accountNotDirty: make(map[common.Address]struct{}), + touchedAccounts: make(map[common.Address]struct{}), } } @@ -196,6 +201,7 @@ func (s *MultiTxSnapshot) objectChanged(address common.Address) bool { // updateBalanceChange updates the snapshot with the balance change. func (s *MultiTxSnapshot) updateBalanceChange(change balanceChange) { + s.touchedAccounts[*change.account] = struct{}{} if s.objectChanged(*change.account) { return } @@ -206,6 +212,7 @@ func (s *MultiTxSnapshot) updateBalanceChange(change balanceChange) { // updateNonceChange updates the snapshot with the nonce change. func (s *MultiTxSnapshot) updateNonceChange(change nonceChange) { + s.touchedAccounts[*change.account] = struct{}{} if s.objectChanged(*change.account) { return } @@ -216,6 +223,7 @@ func (s *MultiTxSnapshot) updateNonceChange(change nonceChange) { // updateCodeChange updates the snapshot with the code change. func (s *MultiTxSnapshot) updateCodeChange(change codeChange) { + s.touchedAccounts[*change.account] = struct{}{} if s.objectChanged(*change.account) { return } @@ -227,6 +235,7 @@ func (s *MultiTxSnapshot) updateCodeChange(change codeChange) { // updateResetObjectChange updates the snapshot with the reset object change. func (s *MultiTxSnapshot) updateResetObjectChange(change resetObjectChange) { + s.touchedAccounts[change.prev.address] = struct{}{} address := change.prev.address if _, ok := s.prevObjects[address]; !ok { s.prevObjects[address] = change.prev @@ -235,6 +244,7 @@ func (s *MultiTxSnapshot) updateResetObjectChange(change resetObjectChange) { // updateCreateObjectChange updates the snapshot with the createObjectChange. func (s *MultiTxSnapshot) updateCreateObjectChange(change createObjectChange) { + s.touchedAccounts[*change.account] = struct{}{} if _, ok := s.prevObjects[*change.account]; !ok { s.prevObjects[*change.account] = nil } @@ -242,6 +252,7 @@ func (s *MultiTxSnapshot) updateCreateObjectChange(change createObjectChange) { // updateSuicideChange updates the snapshot with the suicide change. func (s *MultiTxSnapshot) updateSuicideChange(change suicideChange) { + s.touchedAccounts[*change.account] = struct{}{} if s.objectChanged(*change.account) { return } @@ -255,6 +266,7 @@ func (s *MultiTxSnapshot) updateSuicideChange(change suicideChange) { // updatePendingStorage updates the snapshot with the pending storage change. func (s *MultiTxSnapshot) updatePendingStorage(address common.Address, key, value common.Hash, ok bool) { + s.touchedAccounts[address] = struct{}{} if s.objectChanged(address) { return } @@ -273,6 +285,7 @@ func (s *MultiTxSnapshot) updatePendingStorage(address common.Address, key, valu // updatePendingStatus updates the snapshot with previous pending status. func (s *MultiTxSnapshot) updatePendingStatus(address common.Address, pending, dirty bool) { + s.touchedAccounts[address] = struct{}{} if !pending { s.accountNotPending[address] = struct{}{} } @@ -283,6 +296,7 @@ func (s *MultiTxSnapshot) updatePendingStatus(address common.Address, pending, d // updateObjectDeleted updates the snapshot with the object deletion. func (s *MultiTxSnapshot) updateObjectDeleted(address common.Address, deleted bool) { + s.touchedAccounts[address] = struct{}{} if s.objectChanged(address) { return } @@ -412,6 +426,10 @@ func (s *MultiTxSnapshot) Merge(other *MultiTxSnapshot) error { } } + for address := range other.touchedAccounts { + s.touchedAccounts[address] = struct{}{} + } + return nil } @@ -483,6 +501,13 @@ func (s *MultiTxSnapshot) revertState(st *StateDB) { for address := range s.accountNotDirty { delete(st.stateObjectsDirty, address) } + + // clean dirty state of touched accounts + for address := range s.touchedAccounts { + if obj, ok := st.stateObjects[address]; ok { + obj.dirtyStorage = make(Storage) + } + } } // MultiTxSnapshotStack contains a list of snapshots for multiple transactions associated with a StateDB. diff --git a/core/state/multi_tx_snapshot_test.go b/core/state/multi_tx_snapshot_test.go index 77c4168334..6b6b1d56a5 100644 --- a/core/state/multi_tx_snapshot_test.go +++ b/core/state/multi_tx_snapshot_test.go @@ -346,6 +346,25 @@ func TestMultiTxSnapshotAccountChangesSimple(t *testing.T) { }) } +// This test verifies that dirty account storage is properly cleaned for accounts after revert +func TestMultiTxSnapshotAccountChangesRevertedByJournal(t *testing.T) { + testMultiTxSnapshot(t, func(s *StateDB) { + for _, addr := range addrs { + s.SetState(addr, common.HexToHash("0x01"), common.HexToHash("0x03")) + } + s.Finalise(true) + for _, addr := range addrs { + // we use normal snapshot here because it + // 1. does not mark an account dirty (even though we applied changes) + // 2. changes dirty, uncommitted state of the account + snap := s.Snapshot() + s.SetState(addr, common.HexToHash("0x01"), common.HexToHash("0x02")) + s.RevertToSnapshot(snap) + } + s.Finalise(true) + }) +} + func TestMultiTxSnapshotRefund(t *testing.T) { testMultiTxSnapshot(t, func(s *StateDB) { for _, addr := range addrs { From 3045b14eac8487e9e26064bd25fe497221da4b57 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Mon, 21 Aug 2023 17:36:47 -0500 Subject: [PATCH 37/46] Lint --- miner/algo_common.go | 1 + miner/algo_state_test.go | 196 +++++++++++++++++++++++++------------- miner/env_changes.go | 5 + miner/environment_diff.go | 2 +- 4 files changed, 138 insertions(+), 66 deletions(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index e0fdf5dfa4..aea5602f94 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -45,6 +45,7 @@ var ( var emptyCodeHash = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") var ( + ErrMevGasPriceNotSet = errors.New("mev gas price not set") errInterrupt = errors.New("miner worker interrupted") errNoAlgorithmConfig = errors.New("no algorithm configuration specified") errNoPrivateKey = errors.New("no private key provided") diff --git a/miner/algo_state_test.go b/miner/algo_state_test.go index f999f9c8fa..83dc5b9cc0 100644 --- a/miner/algo_state_test.go +++ b/miner/algo_state_test.go @@ -5,8 +5,11 @@ import ( "context" "crypto/rand" "encoding/hex" - "errors" "fmt" + "math/big" + mathrand "math/rand" + "testing" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" @@ -17,9 +20,6 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/require" - "math/big" - mathrand "math/rand" - "testing" ) // NOTE(wazzymandias): Below is a FuzzTest contract written in Solidity and shown here as reference code @@ -194,6 +194,56 @@ type stateComparisonTestContext struct { type stateComparisonTestContexts []stateComparisonTestContext +func (sc stateComparisonTestContexts) Init(t *testing.T, gasLimit uint64) stateComparisonTestContexts { + for i := range sc { + tc := stateComparisonTestContext{} + tc.statedb, tc.chainData, tc.signers = genTestSetup(gasLimit) + tc.env = newEnvironment(tc.chainData, tc.statedb, tc.signers.addresses[0], gasLimit, big.NewInt(1)) + var err error + switch i { + case Baseline: + tc.Name = "baseline" + tc.envDiff = newEnvironmentDiff(tc.env) + case SingleSnapshot: + tc.Name = "single-snapshot" + tc.changes, err = newEnvChanges(tc.env) + _ = tc.changes.env.state.MultiTxSnapshotCommit() + case MultiSnapshot: + tc.Name = "multi-snapshot" + tc.changes, err = newEnvChanges(tc.env) + _ = tc.changes.env.state.MultiTxSnapshotCommit() + } + + require.NoError(t, err, "failed to initialize test contexts: %v", err) + sc[i] = tc + } + return sc +} + +func (sc stateComparisonTestContexts) ApplyChanges(t *testing.T) { + for _, tc := range sc { + if tc.envDiff != nil { + tc.envDiff.applyToBaseEnv() + } + if tc.changes != nil { + require.NoError(t, tc.changes.apply()) + } + } +} + +func (sc stateComparisonTestContexts) SimulateBundle(testCtxIdx int, b types.MevBundle) (types.SimulatedBundle, error) { + tc := sc[testCtxIdx] + var env *environment + switch testCtxIdx { + case Baseline: + env = tc.envDiff.baseEnvironment + case SingleSnapshot, MultiSnapshot: + env = tc.changes.env + } + + return simulateBundle(env, b, tc.chainData, nil) +} + func (sc stateComparisonTestContexts) ValidateRootHashes(t *testing.T, expected common.Hash) { for _, tc := range sc { require.Equal(t, expected.Bytes(), tc.rootHash.Bytes(), @@ -321,32 +371,6 @@ func (sc stateComparisonTestContexts) ValidateTestCases(t *testing.T, reference } } -func (sc stateComparisonTestContexts) Init(t *testing.T, gasLimit uint64) stateComparisonTestContexts { - for i := range sc { - tc := stateComparisonTestContext{} - tc.statedb, tc.chainData, tc.signers = genTestSetup(gasLimit) - tc.env = newEnvironment(tc.chainData, tc.statedb, tc.signers.addresses[0], gasLimit, big.NewInt(1)) - var err error - switch i { - case Baseline: - tc.Name = "baseline" - tc.envDiff = newEnvironmentDiff(tc.env) - case SingleSnapshot: - tc.Name = "single-snapshot" - tc.changes, err = newEnvChanges(tc.env) - _ = tc.changes.env.state.MultiTxSnapshotCommit() - case MultiSnapshot: - tc.Name = "multi-snapshot" - tc.changes, err = newEnvChanges(tc.env) - _ = tc.changes.env.state.MultiTxSnapshotCommit() - } - - require.NoError(t, err, "failed to initialize test contexts: %v", err) - sc[i] = tc - } - return sc -} - func TestStateComparisons(t *testing.T) { var testContexts = make(stateComparisonTestContexts, 3) @@ -576,7 +600,7 @@ func TestBundles(t *testing.T) { _, err = tc.envDiff.baseEnvironment.state.Commit(true) case SingleSnapshot: - err = tc.env.state.NewMultiTxSnapshot() + err = tc.changes.env.state.NewMultiTxSnapshot() require.NoError(t, err) receipt, _, err = tc.changes.commitTx(signTx, tc.chainData) @@ -588,7 +612,7 @@ func TestBundles(t *testing.T) { _, err = tc.changes.env.state.Commit(true) case MultiSnapshot: - err = tc.env.state.NewMultiTxSnapshot() + err = tc.changes.env.state.NewMultiTxSnapshot() require.NoError(t, err) receipt, _, err = tc.changes.commitTx(signTx, tc.chainData) @@ -680,7 +704,7 @@ func TestBundles(t *testing.T) { actualReceipt, _, err = tc.envDiff.commitTx(actualTx, tc.chainData) tc.envDiff.applyToBaseEnv() case SingleSnapshot: - err = tc.env.state.NewMultiTxSnapshot() + err = tc.changes.env.state.NewMultiTxSnapshot() require.NoError(t, err) actualReceipt, _, err = tc.changes.commitTx(actualTx, tc.chainData) @@ -688,10 +712,10 @@ func TestBundles(t *testing.T) { err = tc.changes.apply() case MultiSnapshot: - err = tc.env.state.NewMultiTxSnapshot() + err = tc.changes.env.state.NewMultiTxSnapshot() require.NoError(t, err) - err = tc.env.state.NewMultiTxSnapshot() + err = tc.changes.env.state.NewMultiTxSnapshot() require.NoError(t, err) actualReceipt, _, err = tc.changes.commitTx(actualTx, tc.chainData) @@ -700,7 +724,7 @@ func TestBundles(t *testing.T) { err = tc.changes.apply() require.NoError(t, err) - err = tc.env.state.MultiTxSnapshotCommit() + err = tc.changes.env.state.MultiTxSnapshotCommit() } require.NoError(t, err) @@ -732,7 +756,7 @@ func TestBundles(t *testing.T) { ChangeStorage ) const ( - bundleCount = 5 + bundleCount = 3 bundleSize = 10 ) @@ -799,56 +823,98 @@ func TestBundles(t *testing.T) { txData, err = changeStorageFuzzTestContract(chainID, nonce, fuzzContractAddress, changeStorageObjectKey, value[:]) } + require.NotNilf(t, txData, "txData is nil for bundle %d, tx %d", bundleIdx, txIdx) require.NoError(t, err) tx := types.MustSignNewTx(s.signers[toAddressRandomIdx], types.LatestSigner(s.config), txData) transactions[txIdx] = tx - s.nonces[toAddressRandomIdx]++ + + // update nonce for all test contexts + base := testContexts[Baseline] + single := testContexts[SingleSnapshot] + multi := testContexts[MultiSnapshot] + + base.signers.nonces[toAddressRandomIdx]++ + single.signers.nonces[toAddressRandomIdx]++ + multi.signers.nonces[toAddressRandomIdx]++ } bundles[bundleIdx] = types.MevBundle{ Txs: transactions[:], } } + + // prepare for bundle application + + // initialize new snapshot(s) for tcIdx, tc := range testContexts { - algoConf := defaultAlgorithmConfig - algoConf.EnforceProfit = true switch tcIdx { case SingleSnapshot, MultiSnapshot: - err = tc.env.state.NewMultiTxSnapshot() + err = tc.changes.env.state.NewMultiTxSnapshot() require.NoError(t, err) } + } - for _, b := range bundles { - sim, err := simulateBundle(tc.env, b, tc.chainData, nil) - - switch tcIdx { - case Baseline: - err = tc.envDiff.commitBundle(&sim, tc.chainData, nil, algoConf) - case SingleSnapshot: - err = tc.changes.commitBundle(&sim, tc.chainData, algoConf) - case MultiSnapshot: - err = tc.changes.commitBundle(&sim, tc.chainData, algoConf) - } - var pe *lowProfitError - if errors.As(err, &pe) || (err != nil && err.Error() == "bundle mev gas price is nil") { - continue + bundleMap := map[int][]types.SimulatedBundle{ + Baseline: make([]types.SimulatedBundle, 0), + SingleSnapshot: make([]types.SimulatedBundle, 0), + MultiSnapshot: make([]types.SimulatedBundle, 0), + } + var ( + simulationErrMap = map[int]error{ + Baseline: nil, + SingleSnapshot: nil, + MultiSnapshot: nil, + } + commitErrMap = map[int]error{ + Baseline: nil, + SingleSnapshot: nil, + MultiSnapshot: nil, + } + ) + // commit bundles one by one to each test context to make sure each bundle result is deterministic + // apply all to the underlying environment at the end + for bundleIdx, b := range bundles { + // first compare simulation results + for tcIdx, tc := range testContexts { + sim, simErr := testContexts.SimulateBundle(tcIdx, b) + t.Logf("bundle simulation error [bundle index %d] [%s]: %v", bundleIdx, tc.Name, simErr) + + simulationErrMap[tcIdx] = simErr + bundleMap[tcIdx] = append(bundleMap[tcIdx], sim) + + if simulationErrMap[Baseline] != nil { + require.NotNilf(t, simulationErrMap[tcIdx], "simulation error is nil for test context %s", tc.Name) + require.Equal(t, simulationErrMap[Baseline].Error(), simulationErrMap[tcIdx].Error(), + "simulation error mismatch for test context %s", tc.Name) } else { - require.NoError(t, err, "test %s", tc.Name) + require.NoError(t, simulationErrMap[tcIdx], "simulation error for test context %s", tc.Name) } } - switch tcIdx { - case Baseline: - tc.envDiff.applyToBaseEnv() - case SingleSnapshot: - err = tc.changes.apply() - require.NoError(t, err) - case MultiSnapshot: - err = tc.changes.apply() - require.NoError(t, err) + algoConf := defaultAlgorithmConfig + algoConf.EnforceProfit = true + for tcIdx, tc := range testContexts { + var commitErr error + sim := bundleMap[tcIdx][bundleIdx] + + switch tcIdx { + case Baseline: + commitErr = tc.envDiff.commitBundle(&sim, tc.chainData, nil, algoConf) + case SingleSnapshot, MultiSnapshot: + commitErr = tc.changes.commitBundle(&sim, tc.chainData, algoConf) + + if commitErrMap[Baseline] != nil { + require.NoError(t, tc.changes.env.state.MultiTxSnapshotRevert()) + } else { + require.NoError(t, tc.changes.env.state.MultiTxSnapshotCommit()) + } + require.NoError(t, tc.changes.env.state.NewMultiTxSnapshot()) + } + commitErrMap[tcIdx] = commitErr } } + testContexts.ApplyChanges(t) testContexts.UpdateRootHashes(t) testContexts.ValidateTestCases(t, Baseline) testContexts.ValidateRootHashes(t, testContexts[Baseline].rootHash) diff --git a/miner/env_changes.go b/miner/env_changes.go index 20cfb45131..09dc20ee91 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -181,6 +181,11 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat return bundleErr } + if bundle.MevGasPrice == nil { + c.rollback(gasUsedBefore, gasPoolBefore, profitBefore, txsBefore, receiptsBefore) + return ErrMevGasPriceNotSet + } + var ( bundleProfit = new(big.Int).Sub(c.env.state.GetBalance(c.env.coinbase), coinbaseBefore) gasUsed = c.usedGas - gasUsedBefore diff --git a/miner/environment_diff.go b/miner/environment_diff.go index b769264746..d41dfd7a30 100644 --- a/miner/environment_diff.go +++ b/miner/environment_diff.go @@ -196,7 +196,7 @@ func (envDiff *environmentDiff) commitBundle(bundle *types.SimulatedBundle, chDa tmpEnvDiff.newProfit.Add(profitBefore, coinbaseBalanceDelta) if bundle.MevGasPrice == nil { - return errors.New("bundle mev gas price is nil") + return ErrMevGasPriceNotSet } var ( From c1ae873bdbe2f21bfc563e5c0683e47feba9fff7 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Mon, 21 Aug 2023 17:58:27 -0500 Subject: [PATCH 38/46] Simplify test --- miner/algo_state_test.go | 43 ++++++++++------------------------------ 1 file changed, 11 insertions(+), 32 deletions(-) diff --git a/miner/algo_state_test.go b/miner/algo_state_test.go index 83dc5b9cc0..ad6a3ca99d 100644 --- a/miner/algo_state_test.go +++ b/miner/algo_state_test.go @@ -241,7 +241,7 @@ func (sc stateComparisonTestContexts) SimulateBundle(testCtxIdx int, b types.Mev env = tc.changes.env } - return simulateBundle(env, b, tc.chainData, nil) + return simulateBundle(env.copy(), b, tc.chainData, nil) } func (sc stateComparisonTestContexts) ValidateRootHashes(t *testing.T, expected common.Hash) { @@ -855,54 +855,33 @@ func TestBundles(t *testing.T) { } } - bundleMap := map[int][]types.SimulatedBundle{ - Baseline: make([]types.SimulatedBundle, 0), - SingleSnapshot: make([]types.SimulatedBundle, 0), - MultiSnapshot: make([]types.SimulatedBundle, 0), - } var ( - simulationErrMap = map[int]error{ - Baseline: nil, - SingleSnapshot: nil, - MultiSnapshot: nil, - } commitErrMap = map[int]error{ Baseline: nil, SingleSnapshot: nil, MultiSnapshot: nil, } ) + + base := testContexts[0] + genesisAlloc := genGenesisAlloc(base.signers, + []common.Address{payProxyAddress, logContractAddress}, [][]byte{payProxyCode, logContractCode}) + simulatedBundleList, err := simulateBundles(base.chainData.chainConfig, + types.CopyHeader(base.env.header), genesisAlloc, bundles[:]) + // commit bundles one by one to each test context to make sure each bundle result is deterministic // apply all to the underlying environment at the end - for bundleIdx, b := range bundles { - // first compare simulation results - for tcIdx, tc := range testContexts { - sim, simErr := testContexts.SimulateBundle(tcIdx, b) - t.Logf("bundle simulation error [bundle index %d] [%s]: %v", bundleIdx, tc.Name, simErr) - - simulationErrMap[tcIdx] = simErr - bundleMap[tcIdx] = append(bundleMap[tcIdx], sim) - - if simulationErrMap[Baseline] != nil { - require.NotNilf(t, simulationErrMap[tcIdx], "simulation error is nil for test context %s", tc.Name) - require.Equal(t, simulationErrMap[Baseline].Error(), simulationErrMap[tcIdx].Error(), - "simulation error mismatch for test context %s", tc.Name) - } else { - require.NoError(t, simulationErrMap[tcIdx], "simulation error for test context %s", tc.Name) - } - } - + for _, b := range simulatedBundleList { algoConf := defaultAlgorithmConfig algoConf.EnforceProfit = true for tcIdx, tc := range testContexts { var commitErr error - sim := bundleMap[tcIdx][bundleIdx] switch tcIdx { case Baseline: - commitErr = tc.envDiff.commitBundle(&sim, tc.chainData, nil, algoConf) + commitErr = tc.envDiff.commitBundle(&b, tc.chainData, nil, algoConf) case SingleSnapshot, MultiSnapshot: - commitErr = tc.changes.commitBundle(&sim, tc.chainData, algoConf) + commitErr = tc.changes.commitBundle(&b, tc.chainData, algoConf) if commitErrMap[Baseline] != nil { require.NoError(t, tc.changes.env.state.MultiTxSnapshotRevert()) From 1062c828615559acfecb53e2c713e97b75b5ac2d Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Mon, 21 Aug 2023 18:26:29 -0500 Subject: [PATCH 39/46] Fix linter error --- miner/algo_state_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/miner/algo_state_test.go b/miner/algo_state_test.go index ad6a3ca99d..29eb4d3d9e 100644 --- a/miner/algo_state_test.go +++ b/miner/algo_state_test.go @@ -868,6 +868,7 @@ func TestBundles(t *testing.T) { []common.Address{payProxyAddress, logContractAddress}, [][]byte{payProxyCode, logContractCode}) simulatedBundleList, err := simulateBundles(base.chainData.chainConfig, types.CopyHeader(base.env.header), genesisAlloc, bundles[:]) + require.NoError(t, err) // commit bundles one by one to each test context to make sure each bundle result is deterministic // apply all to the underlying environment at the end From c14c2997d283ffcf63369bb2556bea809390e67b Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 23 Aug 2023 16:28:01 -0500 Subject: [PATCH 40/46] Use different builders instead of configuration switch since major refactor required to handle more dynamic configurations. --- cmd/geth/main.go | 1 - cmd/utils/flags.go | 9 - miner/algo_common.go | 54 ------ miner/algo_common_test.go | 4 +- miner/algo_greedy.go | 26 +-- miner/algo_greedy_buckets.go | 23 +-- miner/algo_greedy_buckets_multisnap.go | 241 +++++++++++++++++++++++++ miner/algo_greedy_multisnap.go | 134 ++++++++++++++ miner/algo_greedy_test.go | 20 +- miner/algo_test.go | 81 ++++----- miner/env_changes_test.go | 2 - miner/miner.go | 50 ++--- miner/multi_worker.go | 2 +- miner/worker.go | 46 +++-- 14 files changed, 503 insertions(+), 190 deletions(-) create mode 100644 miner/algo_greedy_buckets_multisnap.go create mode 100644 miner/algo_greedy_multisnap.go diff --git a/cmd/geth/main.go b/cmd/geth/main.go index d32b194681..7795f0b7d9 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -158,7 +158,6 @@ var ( builderApiFlags = []cli.Flag{ utils.BuilderEnabled, - utils.BuilderEnableMultiTxSnapshot, utils.BuilderAlgoTypeFlag, utils.BuilderPriceCutoffPercentFlag, utils.BuilderEnableValidatorChecks, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 62fd9dc8d8..d70ed9b12b 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -697,14 +697,6 @@ var ( Usage: "Enable the builder", Category: flags.BuilderCategory, } - BuilderEnableMultiTxSnapshot = &cli.BoolFlag{ - Name: "builder.multi_tx_snapshot", - Usage: "Enable multi-transaction snapshots for block building, " + - "which decrease amount of state copying on bundle reverts (note: experimental)", - EnvVars: []string{"BUILDER_MULTI_TX_SNAPSHOT"}, - Value: ethconfig.Defaults.Miner.EnableMultiTransactionSnapshot, - Category: flags.BuilderCategory, - } // BuilderAlgoTypeFlag replaces MinerAlgoTypeFlag to move away from deprecated miner package // Note: builder.algotype was previously miner.algotype - this flag is still propagated to the miner configuration, @@ -1972,7 +1964,6 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { } } - cfg.EnableMultiTransactionSnapshot = ctx.Bool(BuilderEnableMultiTxSnapshot.Name) cfg.DiscardRevertibleTxOnErr = ctx.Bool(BuilderDiscardRevertibleTxOnErr.Name) cfg.PriceCutoffPercent = ctx.Int(BuilderPriceCutoffPercentFlag.Name) } diff --git a/miner/algo_common.go b/miner/algo_common.go index aea5602f94..e01ea6604a 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -38,7 +38,6 @@ var ( ExpectedProfit: nil, ProfitThresholdPercent: defaultProfitThresholdPercent, PriceCutoffPercent: defaultPriceCutoffPercent, - EnableMultiTxSnap: false, } ) @@ -84,9 +83,6 @@ type algorithmConfig struct { // is 10 (i.e. 10%), then the minimum effective gas price included in the same bucket as the top transaction // is (1000 * 10%) = 100 wei. PriceCutoffPercent int - // EnableMultiTxSnap is true if we want to use multi-transaction snapshot for committing transactions, - // which reduce state copies when reverting failed bundles (note: experimental) - EnableMultiTxSnap bool } type chainData struct { @@ -121,56 +117,6 @@ type ( CommitTxFunc func(*types.Transaction, chainData) (*types.Receipt, int, error) ) -func NewBuildBlockFunc( - inputEnvironment *environment, - builderKey *ecdsa.PrivateKey, - chData chainData, - algoConf algorithmConfig, - greedyBuckets *greedyBucketsBuilder, - greedy *greedyBuilder, -) BuildBlockFunc { - if algoConf.EnableMultiTxSnap { - return func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - orders := types.NewTransactionsByPriceAndNonce(inputEnvironment.signer, transactions, - simBundles, simSBundles, inputEnvironment.header.BaseFee) - - usedBundles, usedSbundles, err := BuildMultiTxSnapBlock( - inputEnvironment, - builderKey, - chData, - algoConf, - orders, - ) - if err != nil { - log.Trace("Error(s) building multi-tx snapshot block", "err", err) - } - return inputEnvironment, usedBundles, usedSbundles - } - } else if builder := greedyBuckets; builder != nil { - return func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - orders := types.NewTransactionsByPriceAndNonce(inputEnvironment.signer, transactions, - simBundles, simSBundles, inputEnvironment.header.BaseFee) - - envDiff := newEnvironmentDiff(inputEnvironment.copy()) - usedBundles, usedSbundles := builder.mergeOrdersIntoEnvDiff(envDiff, orders) - envDiff.applyToBaseEnv() - return envDiff.baseEnvironment, usedBundles, usedSbundles - } - } else if builder := greedy; builder != nil { - return func(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - orders := types.NewTransactionsByPriceAndNonce(inputEnvironment.signer, transactions, - simBundles, simSBundles, inputEnvironment.header.BaseFee) - - envDiff := newEnvironmentDiff(inputEnvironment.copy()) - usedBundles, usedSbundles := builder.mergeOrdersIntoEnvDiff(envDiff, orders) - envDiff.applyToBaseEnv() - return envDiff.baseEnvironment, usedBundles, usedSbundles - } - } else { - panic("invalid call to build block function") - } -} - func ValidateGasPriceAndProfit(algoConf algorithmConfig, actualPrice, expectedPrice *big.Int, tolerablePriceDifferencePercent int, actualProfit, expectedProfit *big.Int) error { // allow tolerablePriceDifferencePercent % divergence diff --git a/miner/algo_common_test.go b/miner/algo_common_test.go index 6eb2984179..e3372626f0 100644 --- a/miner/algo_common_test.go +++ b/miner/algo_common_test.go @@ -525,7 +525,7 @@ func TestGetSealingWorkAlgos(t *testing.T) { testConfig.AlgoType = ALGO_MEV_GETH }) - for _, algoType := range []AlgoType{ALGO_MEV_GETH, ALGO_GREEDY, ALGO_GREEDY_BUCKETS} { + for _, algoType := range []AlgoType{ALGO_MEV_GETH, ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP} { local := new(params.ChainConfig) *local = *ethashChainConfig local.TerminalTotalDifficulty = big.NewInt(0) @@ -540,7 +540,7 @@ func TestGetSealingWorkAlgosWithProfit(t *testing.T) { testConfig.BuilderTxSigningKey = nil }) - for _, algoType := range []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS} { + for _, algoType := range []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP} { var err error testConfig.BuilderTxSigningKey, err = crypto.GenerateKey() require.NoError(t, err) diff --git a/miner/algo_greedy.go b/miner/algo_greedy.go index 4a712f03c3..f40f5ff872 100644 --- a/miner/algo_greedy.go +++ b/miner/algo_greedy.go @@ -20,36 +20,24 @@ type greedyBuilder struct { chainData chainData builderKey *ecdsa.PrivateKey interrupt *int32 - buildBlockFunc BuildBlockFunc algoConf algorithmConfig } func newGreedyBuilder( chain *core.BlockChain, chainConfig *params.ChainConfig, algoConf *algorithmConfig, blacklist map[common.Address]struct{}, env *environment, key *ecdsa.PrivateKey, interrupt *int32, -) (*greedyBuilder, error) { +) *greedyBuilder { if algoConf == nil { - return nil, errNoAlgorithmConfig + panic("algoConf cannot be nil") } - builder := &greedyBuilder{ + return &greedyBuilder{ inputEnvironment: env, - chainData: chainData{chainConfig: chainConfig, chain: chain, blacklist: blacklist}, + chainData: chainData{chainConfig, chain, blacklist}, builderKey: key, interrupt: interrupt, algoConf: *algoConf, } - // Initialize block builder function - builder.buildBlockFunc = NewBuildBlockFunc( - builder.inputEnvironment, - builder.builderKey, - builder.chainData, - builder.algoConf, - nil, - builder, - ) - - return builder, nil } func (b *greedyBuilder) mergeOrdersIntoEnvDiff( @@ -115,5 +103,9 @@ func (b *greedyBuilder) mergeOrdersIntoEnvDiff( } func (b *greedyBuilder) buildBlock(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - return b.buildBlockFunc(simBundles, simSBundles, transactions) + orders := types.NewTransactionsByPriceAndNonce(b.inputEnvironment.signer, transactions, simBundles, simSBundles, b.inputEnvironment.header.BaseFee) + envDiff := newEnvironmentDiff(b.inputEnvironment.copy()) + usedBundles, usedSbundles := b.mergeOrdersIntoEnvDiff(envDiff, orders) + envDiff.applyToBaseEnv() + return envDiff.baseEnvironment, usedBundles, usedSbundles } diff --git a/miner/algo_greedy_buckets.go b/miner/algo_greedy_buckets.go index 63fad2989a..b3e410eb2a 100644 --- a/miner/algo_greedy_buckets.go +++ b/miner/algo_greedy_buckets.go @@ -25,18 +25,17 @@ type greedyBucketsBuilder struct { interrupt *int32 gasUsedMap map[*types.TxWithMinerFee]uint64 algoConf algorithmConfig - buildBlockFunc BuildBlockFunc } func newGreedyBucketsBuilder( chain *core.BlockChain, chainConfig *params.ChainConfig, algoConf *algorithmConfig, blacklist map[common.Address]struct{}, env *environment, key *ecdsa.PrivateKey, interrupt *int32, -) (*greedyBucketsBuilder, error) { +) *greedyBucketsBuilder { if algoConf == nil { - return nil, errNoAlgorithmConfig + panic("algoConf cannot be nil") } - builder := &greedyBucketsBuilder{ + return &greedyBucketsBuilder{ inputEnvironment: env, chainData: chainData{chainConfig: chainConfig, chain: chain, blacklist: blacklist}, builderKey: key, @@ -44,10 +43,6 @@ func newGreedyBucketsBuilder( gasUsedMap: make(map[*types.TxWithMinerFee]uint64), algoConf: *algoConf, } - - // Initialize block builder function - builder.buildBlockFunc = NewBuildBlockFunc(builder.inputEnvironment, builder.builderKey, builder.chainData, builder.algoConf, builder, nil) - return builder, nil } // CutoffPriceFromOrder returns the cutoff price for a given order based on the cutoff percent. @@ -145,9 +140,6 @@ func (b *greedyBucketsBuilder) commit(envDiff *environmentDiff, usedEntry.Success = false usedSbundles = append(usedSbundles, usedEntry) } - } else { - usedEntry.Success = false - usedSbundles = append(usedSbundles, usedEntry) } continue } @@ -223,7 +215,10 @@ func (b *greedyBucketsBuilder) mergeOrdersIntoEnvDiff( return usedBundles, usedSbundles } -func (b *greedyBucketsBuilder) buildBlock(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, - transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { - return b.buildBlockFunc(simBundles, simSBundles, transactions) +func (b *greedyBucketsBuilder) buildBlock(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + orders := types.NewTransactionsByPriceAndNonce(b.inputEnvironment.signer, transactions, simBundles, simSBundles, b.inputEnvironment.header.BaseFee) + envDiff := newEnvironmentDiff(b.inputEnvironment.copy()) + usedBundles, usedSbundles := b.mergeOrdersIntoEnvDiff(envDiff, orders) + envDiff.applyToBaseEnv() + return envDiff.baseEnvironment, usedBundles, usedSbundles } diff --git a/miner/algo_greedy_buckets_multisnap.go b/miner/algo_greedy_buckets_multisnap.go new file mode 100644 index 0000000000..ac95c4c8d9 --- /dev/null +++ b/miner/algo_greedy_buckets_multisnap.go @@ -0,0 +1,241 @@ +package miner + +import ( + "crypto/ecdsa" + "errors" + "math/big" + "sort" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" +) + +// / To use it: +// / 1. Copy relevant data from the worker +// / 2. Call buildBlock +// / 2. If new bundles, txs arrive, call buildBlock again +// / This struct lifecycle is tied to 1 block-building task +type greedyBucketsMultiSnapBuilder struct { + inputEnvironment *environment + chainData chainData + builderKey *ecdsa.PrivateKey + interrupt *int32 + gasUsedMap map[*types.TxWithMinerFee]uint64 + algoConf algorithmConfig +} + +func newGreedyBucketsMultiSnapBuilder( + chain *core.BlockChain, chainConfig *params.ChainConfig, algoConf *algorithmConfig, + blacklist map[common.Address]struct{}, env *environment, key *ecdsa.PrivateKey, interrupt *int32, +) *greedyBucketsMultiSnapBuilder { + if algoConf == nil { + panic("algoConf cannot be nil") + } + + return &greedyBucketsMultiSnapBuilder{ + inputEnvironment: env, + chainData: chainData{chainConfig: chainConfig, chain: chain, blacklist: blacklist}, + builderKey: key, + interrupt: interrupt, + gasUsedMap: make(map[*types.TxWithMinerFee]uint64), + algoConf: *algoConf, + } +} + +func (b *greedyBucketsMultiSnapBuilder) commit(changes *envChanges, + transactions []*types.TxWithMinerFee, + orders *types.TransactionsByPriceAndNonce, + gasUsedMap map[*types.TxWithMinerFee]uint64, retryMap map[*types.TxWithMinerFee]int, retryLimit int, +) ([]types.SimulatedBundle, []types.UsedSBundle) { + var ( + algoConf = b.algoConf + + usedBundles []types.SimulatedBundle + usedSbundles []types.UsedSBundle + ) + + for _, order := range transactions { + if err := changes.env.state.NewMultiTxSnapshot(); err != nil { + log.Error("Failed to create new multi-tx snapshot", "err", err) + return usedBundles, usedSbundles + } + + orderFailed := false + + if tx := order.Tx(); tx != nil { + receipt, skip, err := changes.commitTx(tx, b.chainData) + orderFailed = err != nil + if err != nil { + log.Trace("could not apply tx", "hash", tx.Hash(), "err", err) + + // attempt to retry transaction commit up to retryLimit + // the gas used is set for the order to re-calculate profit of the transaction for subsequent retries + if receipt != nil { + // if the receipt is nil we don't attempt to retry the transaction - this is to mitigate abuse since + // without a receipt the default profit calculation for a transaction uses the gas limit which + // can cause the transaction to always be first in any profit-sorted transaction list + gasUsedMap[order] = receipt.GasUsed + CheckRetryOrderAndReinsert(order, orders, retryMap, retryLimit) + } + } else { + if skip == shiftTx { + orders.ShiftAndPushByAccountForTx(tx) + } + // we don't check for error here because if EGP returns error, it would have been caught and returned by commitTx + effGapPrice, _ := tx.EffectiveGasTip(changes.env.header.BaseFee) + log.Trace("Included tx", "EGP", effGapPrice.String(), "gasUsed", receipt.GasUsed) + } + } else if bundle := order.Bundle(); bundle != nil { + err := changes.commitBundle(bundle, b.chainData, algoConf) + orderFailed = err != nil + if err != nil { + log.Trace("Could not apply bundle", "bundle", bundle.OriginalBundle.Hash, "err", err) + + var e *lowProfitError + if errors.As(err, &e) { + if e.ActualEffectiveGasPrice != nil { + order.SetPrice(e.ActualEffectiveGasPrice) + } + + if e.ActualProfit != nil { + order.SetProfit(e.ActualProfit) + } + // if the bundle was not included due to low profit, we can retry the bundle + CheckRetryOrderAndReinsert(order, orders, retryMap, retryLimit) + } + } else { + log.Trace("Included bundle", "bundleEGP", bundle.MevGasPrice.String(), + "gasUsed", bundle.TotalGasUsed, "ethToCoinbase", ethIntToFloat(bundle.EthSentToCoinbase)) + usedBundles = append(usedBundles, *bundle) + } + } else if sbundle := order.SBundle(); sbundle != nil { + err := changes.CommitSBundle(sbundle, b.chainData, b.builderKey, algoConf) + orderFailed = err != nil + usedEntry := types.UsedSBundle{ + Bundle: sbundle.Bundle, + Success: err == nil, + } + + isValidOrNotRetried := true + if err != nil { + log.Trace("Could not apply sbundle", "bundle", sbundle.Bundle.Hash(), "err", err) + + var e *lowProfitError + if errors.As(err, &e) { + if e.ActualEffectiveGasPrice != nil { + order.SetPrice(e.ActualEffectiveGasPrice) + } + + if e.ActualProfit != nil { + order.SetProfit(e.ActualProfit) + } + + // if the sbundle was not included due to low profit, we can retry the bundle + if ok := CheckRetryOrderAndReinsert(order, orders, retryMap, retryLimit); ok { + isValidOrNotRetried = false + } + } + } else { + log.Trace("Included sbundle", "bundleEGP", sbundle.MevGasPrice.String(), "ethToCoinbase", ethIntToFloat(sbundle.Profit)) + } + + if isValidOrNotRetried { + usedSbundles = append(usedSbundles, usedEntry) + } + } else { + // note: this should never happen because we should not be inserting invalid transaction types into + // the orders heap + panic("unsupported order type found") + } + + if orderFailed { + if err := changes.env.state.MultiTxSnapshotRevert(); err != nil { + log.Error("Failed to revert snapshot", "err", err) + return usedBundles, usedSbundles + } + } else { + if err := changes.env.state.MultiTxSnapshotCommit(); err != nil { + log.Error("Failed to commit snapshot", "err", err) + return usedBundles, usedSbundles + } + } + } + return usedBundles, usedSbundles +} + +func (b *greedyBucketsMultiSnapBuilder) mergeOrdersAndApplyToEnv( + orders *types.TransactionsByPriceAndNonce) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + if orders.Peek() == nil { + return b.inputEnvironment, nil, nil + } + + changes, err := newEnvChanges(b.inputEnvironment) + if err != nil { + log.Error("Failed to create new environment changes", "err", err) + return b.inputEnvironment, nil, nil + } + + const retryLimit = 1 + + var ( + baseFee = changes.env.header.BaseFee + retryMap = make(map[*types.TxWithMinerFee]int) + usedBundles []types.SimulatedBundle + usedSbundles []types.UsedSBundle + transactions []*types.TxWithMinerFee + priceCutoffPercent = b.algoConf.PriceCutoffPercent + + SortInPlaceByProfit = func(baseFee *big.Int, transactions []*types.TxWithMinerFee, gasUsedMap map[*types.TxWithMinerFee]uint64) { + sort.SliceStable(transactions, func(i, j int) bool { + return transactions[i].Profit(baseFee, gasUsedMap[transactions[i]]).Cmp(transactions[j].Profit(baseFee, gasUsedMap[transactions[j]])) > 0 + }) + } + ) + + minPrice := CutoffPriceFromOrder(orders.Peek(), priceCutoffPercent) + for { + order := orders.Peek() + if order == nil { + if len(transactions) != 0 { + SortInPlaceByProfit(baseFee, transactions, b.gasUsedMap) + bundles, sbundles := b.commit(changes, transactions, orders, b.gasUsedMap, retryMap, retryLimit) + usedBundles = append(usedBundles, bundles...) + usedSbundles = append(usedSbundles, sbundles...) + transactions = nil + // re-run since committing transactions may have pushed higher nonce transactions, or previously + // failed transactions back into orders heap + continue + } + break + } + + if ok := IsOrderInPriceRange(order, minPrice); ok { + orders.Pop() + transactions = append(transactions, order) + } else { + if len(transactions) != 0 { + SortInPlaceByProfit(baseFee, transactions, b.gasUsedMap) + bundles, sbundles := b.commit(changes, transactions, orders, b.gasUsedMap, retryMap, retryLimit) + usedBundles = append(usedBundles, bundles...) + usedSbundles = append(usedSbundles, sbundles...) + transactions = nil + } + minPrice = CutoffPriceFromOrder(order, priceCutoffPercent) + } + } + + if err := changes.apply(); err != nil { + log.Error("Failed to apply changes", "err", err) + return b.inputEnvironment, nil, nil + } + + return changes.env, usedBundles, usedSbundles +} + +func (b *greedyBucketsMultiSnapBuilder) buildBlock(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + orders := types.NewTransactionsByPriceAndNonce(b.inputEnvironment.signer, transactions, simBundles, simSBundles, b.inputEnvironment.header.BaseFee) + return b.mergeOrdersAndApplyToEnv(orders) +} diff --git a/miner/algo_greedy_multisnap.go b/miner/algo_greedy_multisnap.go new file mode 100644 index 0000000000..ca3ee3d3ed --- /dev/null +++ b/miner/algo_greedy_multisnap.go @@ -0,0 +1,134 @@ +package miner + +import ( + "crypto/ecdsa" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" +) + +// / To use it: +// / 1. Copy relevant data from the worker +// / 2. Call buildBlock +// / 2. If new bundles, txs arrive, call buildBlock again +// / This struct lifecycle is tied to 1 block-building task +type greedyMultiSnapBuilder struct { + inputEnvironment *environment + chainData chainData + builderKey *ecdsa.PrivateKey + interrupt *int32 + algoConf algorithmConfig +} + +func newGreedyMultiSnapBuilder( + chain *core.BlockChain, chainConfig *params.ChainConfig, algoConf *algorithmConfig, + blacklist map[common.Address]struct{}, env *environment, key *ecdsa.PrivateKey, interrupt *int32, +) *greedyMultiSnapBuilder { + if algoConf == nil { + algoConf = &defaultAlgorithmConfig + } + return &greedyMultiSnapBuilder{ + inputEnvironment: env, + chainData: chainData{chainConfig, chain, blacklist}, + builderKey: key, + interrupt: interrupt, + algoConf: *algoConf, + } +} + +func (b *greedyMultiSnapBuilder) buildBlock(simBundles []types.SimulatedBundle, simSBundles []*types.SimSBundle, transactions map[common.Address]types.Transactions) (*environment, []types.SimulatedBundle, []types.UsedSBundle) { + orders := types.NewTransactionsByPriceAndNonce(b.inputEnvironment.signer, transactions, simBundles, simSBundles, b.inputEnvironment.header.BaseFee) + + var ( + usedBundles []types.SimulatedBundle + usedSbundles []types.UsedSBundle + ) + + changes, err := newEnvChanges(b.inputEnvironment) + if err != nil { + log.Error("Failed to create new environment changes", "err", err) + return b.inputEnvironment, usedBundles, usedSbundles + } + + for { + order := orders.Peek() + if order == nil { + break + } + + orderFailed := false + if err := changes.env.state.NewMultiTxSnapshot(); err != nil { + log.Error("Failed to create snapshot", "err", err) + return b.inputEnvironment, usedBundles, usedSbundles + } + + if tx := order.Tx(); tx != nil { + receipt, skip, err := changes.commitTx(tx, b.chainData) + switch skip { + case shiftTx: + orders.Shift() + case popTx: + orders.Pop() + } + orderFailed = err != nil + + if err != nil { + log.Trace("could not apply tx", "hash", tx.Hash(), "err", err) + } else { + // we don't check for error here because if EGP returns error, it would have been caught and returned by commitTx + effGapPrice, _ := tx.EffectiveGasTip(changes.env.header.BaseFee) + log.Trace("Included tx", "EGP", effGapPrice.String(), "gasUsed", receipt.GasUsed) + } + } else if bundle := order.Bundle(); bundle != nil { + err := changes.commitBundle(bundle, b.chainData, b.algoConf) + orders.Pop() + orderFailed = err != nil + + if err != nil { + log.Trace("Could not apply bundle", "bundle", bundle.OriginalBundle.Hash, "err", err) + } else { + log.Trace("Included bundle", "bundleEGP", bundle.MevGasPrice.String(), + "gasUsed", bundle.TotalGasUsed, "ethToCoinbase", ethIntToFloat(bundle.EthSentToCoinbase)) + usedBundles = append(usedBundles, *bundle) + } + } else if sbundle := order.SBundle(); sbundle != nil { + err := changes.CommitSBundle(sbundle, b.chainData, b.builderKey, b.algoConf) + orders.Pop() + orderFailed = err != nil + usedEntry := types.UsedSBundle{ + Bundle: sbundle.Bundle, + Success: err == nil, + } + + if err != nil { + log.Trace("Could not apply sbundle", "bundle", sbundle.Bundle.Hash(), "err", err) + } else { + log.Trace("Included sbundle", "bundleEGP", sbundle.MevGasPrice.String(), "ethToCoinbase", ethIntToFloat(sbundle.Profit)) + } + + usedSbundles = append(usedSbundles, usedEntry) + } + + if orderFailed { + if err := changes.env.state.MultiTxSnapshotRevert(); err != nil { + log.Error("Failed to revert snapshot", "err", err) + return b.inputEnvironment, usedBundles, usedSbundles + } + } else { + if err := changes.env.state.MultiTxSnapshotCommit(); err != nil { + log.Error("Failed to commit snapshot", "err", err) + return b.inputEnvironment, usedBundles, usedSbundles + } + } + } + + if err := changes.apply(); err != nil { + log.Error("Failed to apply changes", "err", err) + return b.inputEnvironment, usedBundles, usedSbundles + } + + return changes.env, usedBundles, usedSbundles +} diff --git a/miner/algo_greedy_test.go b/miner/algo_greedy_test.go index c7d6aae682..ba680ec059 100644 --- a/miner/algo_greedy_test.go +++ b/miner/algo_greedy_test.go @@ -11,7 +11,7 @@ import ( ) func TestBuildBlockGasLimit(t *testing.T) { - algos := []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS} + algos := []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP} for _, algo := range algos { statedb, chData, signers := genTestSetup(GasLimit) env := newEnvironment(chData, statedb, signers.addresses[0], 21000, big.NewInt(1)) @@ -29,17 +29,17 @@ func TestBuildBlockGasLimit(t *testing.T) { var result *environment switch algo { + case ALGO_GREEDY: + builder := newGreedyBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) + result, _, _ = builder.buildBlock([]types.SimulatedBundle{}, nil, txs) + case ALGO_GREEDY_MULTISNAP: + builder := newGreedyMultiSnapBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) + result, _, _ = builder.buildBlock([]types.SimulatedBundle{}, nil, txs) case ALGO_GREEDY_BUCKETS: - builder, err := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) - if err != nil { - t.Fatalf("Error creating greedy buckets builder: %v", err) - } + builder := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) result, _, _ = builder.buildBlock([]types.SimulatedBundle{}, nil, txs) - case ALGO_GREEDY: - builder, err := newGreedyBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) - if err != nil { - t.Fatalf("Error creating greedy builder: %v", err) - } + case ALGO_GREEDY_BUCKETS_MULTISNAP: + builder := newGreedyBucketsMultiSnapBuilder(chData.chain, chData.chainConfig, &defaultAlgorithmConfig, nil, env, nil, nil) result, _, _ = builder.buildBlock([]types.SimulatedBundle{}, nil, txs) } diff --git a/miner/algo_test.go b/miner/algo_test.go index 5e3251467b..ab63031e48 100644 --- a/miner/algo_test.go +++ b/miner/algo_test.go @@ -38,7 +38,7 @@ var algoTests = []*algoTest{ } }, WantProfit: big.NewInt(2 * 21_000), - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: defaultAlgorithmConfig, }, { @@ -65,7 +65,7 @@ var algoTests = []*algoTest{ } }, WantProfit: big.NewInt(4 * 21_000), - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: defaultAlgorithmConfig, }, { @@ -84,7 +84,7 @@ var algoTests = []*algoTest{ } }, WantProfit: big.NewInt(0), - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: defaultAlgorithmConfig, }, { @@ -106,7 +106,7 @@ var algoTests = []*algoTest{ } }, WantProfit: big.NewInt(50_000), - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: defaultAlgorithmConfig, }, { @@ -128,7 +128,7 @@ var algoTests = []*algoTest{ } }, WantProfit: common.Big0, - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: algorithmConfig{ DropRevertibleTxOnErr: true, EnforceProfit: defaultAlgorithmConfig.EnforceProfit, @@ -160,7 +160,7 @@ var algoTests = []*algoTest{ } }, WantProfit: big.NewInt(21_000), - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: algorithmConfig{ DropRevertibleTxOnErr: true, EnforceProfit: defaultAlgorithmConfig.EnforceProfit, @@ -191,7 +191,7 @@ var algoTests = []*algoTest{ } }, WantProfit: big.NewInt(50_000), - SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS}, + SupportedAlgorithms: []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP}, AlgorithmConfig: defaultAlgorithmConfig, }, } @@ -204,38 +204,25 @@ func TestAlgo(t *testing.T) { for _, test := range algoTests { for _, algo := range test.SupportedAlgorithms { - // test with multi-tx-snapshot enabled and disabled (default) - multiSnapDisabled := defaultAlgorithmConfig - multiSnapDisabled.EnableMultiTxSnap = false + testName := fmt.Sprintf("%s-%s", test.Name, algo.String()) - multiSnapEnabled := defaultAlgorithmConfig - multiSnapEnabled.EnableMultiTxSnap = true - - algoConfigs := []algorithmConfig{ - multiSnapEnabled, - multiSnapDisabled, - } - for _, algoConf := range algoConfigs { - testName := fmt.Sprintf("%s-%s-%t", test.Name, algo.String(), algoConf.EnableMultiTxSnap) - - t.Run(testName, func(t *testing.T) { - alloc, txPool, bundles, err := test.build(signer, 1) - if err != nil { - t.Fatalf("Build: %v", err) - } - simBundles, err := simulateBundles(config, test.Header, alloc, bundles) - if err != nil { - t.Fatalf("Simulate Bundles: %v", err) - } - gotProfit, err := runAlgoTest(algo, algoConf, config, alloc, txPool, simBundles, test.Header, 1) - if err != nil { - t.Fatal(err) - } - if test.WantProfit.Cmp(gotProfit) != 0 { - t.Fatalf("Profit: want %v, got %v", test.WantProfit, gotProfit) - } - }) - } + t.Run(testName, func(t *testing.T) { + alloc, txPool, bundles, err := test.build(signer, 1) + if err != nil { + t.Fatalf("Build: %v", err) + } + simBundles, err := simulateBundles(config, test.Header, alloc, bundles) + if err != nil { + t.Fatalf("Simulate Bundles: %v", err) + } + gotProfit, err := runAlgoTest(algo, test.AlgorithmConfig, config, alloc, txPool, simBundles, test.Header, 1) + if err != nil { + t.Fatal(err) + } + if test.WantProfit.Cmp(gotProfit) != 0 { + t.Fatalf("Profit: want %v, got %v", test.WantProfit, gotProfit) + } + }) } } } @@ -307,17 +294,17 @@ func runAlgoTest( // build block switch algo { + case ALGO_GREEDY: + builder := newGreedyBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) + resultEnv, _, _ = builder.buildBlock(bundles, nil, txPool) + case ALGO_GREEDY_MULTISNAP: + builder := newGreedyMultiSnapBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) + resultEnv, _, _ = builder.buildBlock(bundles, nil, txPool) case ALGO_GREEDY_BUCKETS: - builder, err := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) - if err != nil { - return nil, err - } + builder := newGreedyBucketsBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) resultEnv, _, _ = builder.buildBlock(bundles, nil, txPool) - case ALGO_GREEDY: - builder, err := newGreedyBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) - if err != nil { - return nil, err - } + case ALGO_GREEDY_BUCKETS_MULTISNAP: + builder := newGreedyBucketsMultiSnapBuilder(chData.chain, chData.chainConfig, &algoConf, nil, env, nil, nil) resultEnv, _, _ = builder.buildBlock(bundles, nil, txPool) } return resultEnv.profit, nil diff --git a/miner/env_changes_test.go b/miner/env_changes_test.go index e5b4fc2740..decb179dca 100644 --- a/miner/env_changes_test.go +++ b/miner/env_changes_test.go @@ -61,7 +61,6 @@ func TestBundleCommitSnaps(t *testing.T) { statedb, chData, signers := genTestSetup(GasLimit) algoConf := defaultAlgorithmConfig - algoConf.EnableMultiTxSnap = true env := newEnvironment(chData, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) @@ -170,7 +169,6 @@ func TestErrorBundleCommitSnaps(t *testing.T) { statedb, chData, signers := genTestSetup(GasLimit) algoConf := defaultAlgorithmConfig - algoConf.EnableMultiTxSnap = true env := newEnvironment(chData, statedb, signers.addresses[0], 21000*2, big.NewInt(1)) // This tx will be included before bundle so bundle will fail because of gas limit diff --git a/miner/miner.go b/miner/miner.go index a6c8a1618b..203132fc7d 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -54,16 +54,22 @@ const ( ALGO_MEV_GETH AlgoType = iota ALGO_GREEDY ALGO_GREEDY_BUCKETS + ALGO_GREEDY_MULTISNAP + ALGO_GREEDY_BUCKETS_MULTISNAP ) func (a AlgoType) String() string { switch a { case ALGO_GREEDY: return "greedy" + case ALGO_GREEDY_MULTISNAP: + return "greedy-multi-snap" case ALGO_MEV_GETH: return "mev-geth" case ALGO_GREEDY_BUCKETS: return "greedy-buckets" + case ALGO_GREEDY_BUCKETS_MULTISNAP: + return "greedy-buckets-multi-snap" default: return "unsupported" } @@ -77,6 +83,10 @@ func AlgoTypeFlagToEnum(algoString string) (AlgoType, error) { return ALGO_GREEDY_BUCKETS, nil case ALGO_GREEDY.String(): return ALGO_GREEDY, nil + case ALGO_GREEDY_MULTISNAP.String(): + return ALGO_GREEDY_MULTISNAP, nil + case ALGO_GREEDY_BUCKETS_MULTISNAP.String(): + return ALGO_GREEDY_BUCKETS_MULTISNAP, nil default: return ALGO_MEV_GETH, errors.New("algo not recognized") } @@ -84,23 +94,22 @@ func AlgoTypeFlagToEnum(algoString string) (AlgoType, error) { // Config is the configuration parameters of mining. type Config struct { - Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account) - Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash). - NotifyFull bool `toml:",omitempty"` // Notify with pending block headers instead of work packages - ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner - GasFloor uint64 // Target gas floor for mined blocks. - GasCeil uint64 // Target gas ceiling for mined blocks. - GasPrice *big.Int // Minimum gas price for mining a transaction - AlgoType AlgoType // Algorithm to use for block building - Recommit time.Duration // The time interval for miner to re-create mining work. - Noverify bool // Disable remote mining solution verification(only useful in ethash). - BuilderTxSigningKey *ecdsa.PrivateKey `toml:",omitempty"` // Signing key of builder coinbase to make transaction to validator - MaxMergedBundles int - Blocklist []common.Address `toml:",omitempty"` - NewPayloadTimeout time.Duration // The maximum time allowance for creating a new payload - PriceCutoffPercent int // Effective gas price cutoff % used for bucketing transactions by price (only useful in greedy-buckets AlgoType) - DiscardRevertibleTxOnErr bool // When enabled, if bundle revertible transaction has error on commit, builder will discard the transaction - EnableMultiTransactionSnapshot bool // Enable block building with multi-transaction snapshots to reduce state copying (note: experimental) + Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account) + Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash). + NotifyFull bool `toml:",omitempty"` // Notify with pending block headers instead of work packages + ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner + GasFloor uint64 // Target gas floor for mined blocks. + GasCeil uint64 // Target gas ceiling for mined blocks. + GasPrice *big.Int // Minimum gas price for mining a transaction + AlgoType AlgoType // Algorithm to use for block building + Recommit time.Duration // The time interval for miner to re-create mining work. + Noverify bool // Disable remote mining solution verification(only useful in ethash). + BuilderTxSigningKey *ecdsa.PrivateKey `toml:",omitempty"` // Signing key of builder coinbase to make transaction to validator + MaxMergedBundles int + Blocklist []common.Address `toml:",omitempty"` + NewPayloadTimeout time.Duration // The maximum time allowance for creating a new payload + PriceCutoffPercent int // Effective gas price cutoff % used for bucketing transactions by price (only useful in greedy-buckets AlgoType) + DiscardRevertibleTxOnErr bool // When enabled, if bundle revertible transaction has error on commit, builder will discard the transaction } // DefaultConfig contains default settings for miner. @@ -112,10 +121,9 @@ var DefaultConfig = Config{ // consensus-layer usually will wait a half slot of time(6s) // for payload generation. It should be enough for Geth to // run 3 rounds. - Recommit: 2 * time.Second, - NewPayloadTimeout: 2 * time.Second, - PriceCutoffPercent: defaultPriceCutoffPercent, - EnableMultiTransactionSnapshot: defaultAlgorithmConfig.EnableMultiTxSnap, + Recommit: 2 * time.Second, + NewPayloadTimeout: 2 * time.Second, + PriceCutoffPercent: defaultPriceCutoffPercent, } // Miner creates blocks and searches for proof-of-work values. diff --git a/miner/multi_worker.go b/miner/multi_worker.go index 93cb8aadae..ab33a84ee8 100644 --- a/miner/multi_worker.go +++ b/miner/multi_worker.go @@ -141,7 +141,7 @@ func newMultiWorker(config *Config, chainConfig *params.ChainConfig, engine cons switch config.AlgoType { case ALGO_MEV_GETH: return newMultiWorkerMevGeth(config, chainConfig, engine, eth, mux, isLocalBlock, init) - case ALGO_GREEDY, ALGO_GREEDY_BUCKETS: + case ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP: return newMultiWorkerGreedy(config, chainConfig, engine, eth, mux, isLocalBlock, init) default: panic("unsupported builder algorithm found") diff --git a/miner/worker.go b/miner/worker.go index 2453ec8395..ef31510ba2 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1312,7 +1312,7 @@ func (w *worker) fillTransactionsSelectAlgo(interrupt *int32, env *environment) err error ) switch w.flashbots.algoType { - case ALGO_GREEDY, ALGO_GREEDY_BUCKETS: + case ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP: blockBundles, allBundles, usedSbundles, mempoolTxHashes, err = w.fillTransactionsAlgoWorker(interrupt, env) case ALGO_MEV_GETH: blockBundles, allBundles, mempoolTxHashes, err = w.fillTransactions(interrupt, env) @@ -1426,37 +1426,59 @@ func (w *worker) fillTransactionsAlgoWorker(interrupt *int32, env *environment) EnforceProfit: true, ProfitThresholdPercent: defaultProfitThresholdPercent, PriceCutoffPercent: priceCutoffPercent, - EnableMultiTxSnap: w.config.EnableMultiTransactionSnapshot, } - builder, err := newGreedyBucketsBuilder( + builder := newGreedyBucketsBuilder( w.chain, w.chainConfig, algoConf, w.blockList, env, w.config.BuilderTxSigningKey, interrupt, ) - if err != nil { - return nil, nil, nil, nil, err + + newEnv, blockBundles, usedSbundle = builder.buildBlock(bundlesToConsider, sbundlesToConsider, pending) + case ALGO_GREEDY_BUCKETS_MULTISNAP: + priceCutoffPercent := w.config.PriceCutoffPercent + if !(priceCutoffPercent >= 0 && priceCutoffPercent <= 100) { + return nil, nil, nil, nil, errors.New("invalid price cutoff percent - must be between 0 and 100") + } + + algoConf := &algorithmConfig{ + DropRevertibleTxOnErr: w.config.DiscardRevertibleTxOnErr, + EnforceProfit: true, + ProfitThresholdPercent: defaultProfitThresholdPercent, + PriceCutoffPercent: priceCutoffPercent, + } + builder := newGreedyBucketsMultiSnapBuilder( + w.chain, w.chainConfig, algoConf, w.blockList, env, + w.config.BuilderTxSigningKey, interrupt, + ) + newEnv, blockBundles, usedSbundle = builder.buildBlock(bundlesToConsider, sbundlesToConsider, pending) + case ALGO_GREEDY_MULTISNAP: + // For greedy multi-snap builder, set algorithm configuration to default values, + // except DropRevertibleTxOnErr which is passed in from worker config + algoConf := &algorithmConfig{ + DropRevertibleTxOnErr: w.config.DiscardRevertibleTxOnErr, + EnforceProfit: defaultAlgorithmConfig.EnforceProfit, + ProfitThresholdPercent: defaultAlgorithmConfig.ProfitThresholdPercent, } + builder := newGreedyMultiSnapBuilder( + w.chain, w.chainConfig, algoConf, w.blockList, env, + w.config.BuilderTxSigningKey, interrupt, + ) newEnv, blockBundles, usedSbundle = builder.buildBlock(bundlesToConsider, sbundlesToConsider, pending) case ALGO_GREEDY: fallthrough default: // For default greedy builder, set algorithm configuration to default values, - // except DropRevertibleTxOnErr and EnableMultiTxSnap which are passed in from worker config + // except DropRevertibleTxOnErr which is passed in from worker config algoConf := &algorithmConfig{ - EnableMultiTxSnap: w.config.EnableMultiTransactionSnapshot, DropRevertibleTxOnErr: w.config.DiscardRevertibleTxOnErr, EnforceProfit: defaultAlgorithmConfig.EnforceProfit, ProfitThresholdPercent: defaultAlgorithmConfig.ProfitThresholdPercent, } - builder, err := newGreedyBuilder( + builder := newGreedyBuilder( w.chain, w.chainConfig, algoConf, w.blockList, env, w.config.BuilderTxSigningKey, interrupt, ) - if err != nil { - return nil, nil, nil, nil, err - } - newEnv, blockBundles, usedSbundle = builder.buildBlock(bundlesToConsider, sbundlesToConsider, pending) } From 811b2942456a997d2e263707559d252d0c69967b Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 23 Aug 2023 17:27:29 -0500 Subject: [PATCH 41/46] Update env changes to reduce redundancy and make control flow easier to follow --- miner/env_changes.go | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/miner/env_changes.go b/miner/env_changes.go index 09dc20ee91..3af63f0399 100644 --- a/miner/env_changes.go +++ b/miner/env_changes.go @@ -142,30 +142,23 @@ func (c *envChanges) commitBundle(bundle *types.SimulatedBundle, chData chainDat } receipt, _, err := c.commitTx(tx, chData) - if err != nil { + switch { + case err != nil: isRevertibleTx := bundle.OriginalBundle.RevertingHash(txHash) // if drop enabled, and revertible tx has error on commit, we skip the transaction and continue with next one if algoConf.DropRevertibleTxOnErr && isRevertibleTx { log.Trace("Found error on commit for revertible tx, but discard on err is enabled so skipping.", "tx", txHash, "err", err) - continue + } else { + bundleErr = err } - log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) - bundleErr = err - break - } - - if bundleErr != nil { - break - } - - if receipt != nil { + case receipt != nil: if receipt.Status == types.ReceiptStatusFailed && !bundle.OriginalBundle.RevertingHash(txHash) { // if transaction reverted and isn't specified as reverting hash, return error log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) bundleErr = errors.New("bundle tx revert") } - } else { + case receipt == nil && err == nil: // NOTE: The expectation is that a receipt is only nil if an error occurred. // If there is no error but receipt is nil, there is likely a programming error. bundleErr = errors.New("invalid receipt when no error occurred") From 49157e86fe587a62cac01b4966ac0d3876c3fba1 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 23 Aug 2023 17:54:57 -0500 Subject: [PATCH 42/46] Fix linter error --- miner/algo_common.go | 1 - 1 file changed, 1 deletion(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index e01ea6604a..70bf3fe65f 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -46,7 +46,6 @@ var emptyCodeHash = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca var ( ErrMevGasPriceNotSet = errors.New("mev gas price not set") errInterrupt = errors.New("miner worker interrupted") - errNoAlgorithmConfig = errors.New("no algorithm configuration specified") errNoPrivateKey = errors.New("no private key provided") ) From 03dd29c8d0ea6a409d65a9e4c7ca08f720c0ffbd Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 23 Aug 2023 18:01:08 -0500 Subject: [PATCH 43/46] Remove debug validation --- builder/builder.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/builder/builder.go b/builder/builder.go index ba174317b1..44201bcdeb 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -324,11 +324,6 @@ func (b *Builder) submitCapellaBlock(block *types.Block, blockValue *big.Int, or log.Error("could not validate block for capella", "err", err) } } else { - err = b.validator.ValidateBuilderSubmissionV2(&blockvalidation.BuilderBlockValidationRequestV2{SubmitBlockRequest: blockSubmitReq, RegisteredGasLimit: vd.GasLimit}) - if err != nil { - log.Error("could not validate block for capella", "err", err) - return err - } go b.ds.ConsumeBuiltBlock(block, blockValue, ordersClosedAt, sealedAt, commitedBundles, allBundles, usedSbundles, &blockBidMsg) err = b.relay.SubmitBlockCapella(&blockSubmitReq, vd) if err != nil { From f2c53206a56094d42db5fa33701a4342eec27da6 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Fri, 25 Aug 2023 11:16:16 -0500 Subject: [PATCH 44/46] Update comments, add touch change to state fuzz test smart contract --- core/state/multi_tx_snapshot.go | 7 ++++- core/state/statedb.go | 7 +---- miner/algo_state_test.go | 48 ++++++++++++++++++--------------- 3 files changed, 33 insertions(+), 29 deletions(-) diff --git a/core/state/multi_tx_snapshot.go b/core/state/multi_tx_snapshot.go index 78c4ad92e0..f70842a98f 100644 --- a/core/state/multi_tx_snapshot.go +++ b/core/state/multi_tx_snapshot.go @@ -110,6 +110,10 @@ func (s MultiTxSnapshot) Copy() MultiTxSnapshot { newSnapshot.accountNotDirty[address] = struct{}{} } + for address := range s.touchedAccounts { + newSnapshot.touchedAccounts[address] = struct{}{} + } + return newSnapshot } @@ -167,7 +171,8 @@ func (s *MultiTxSnapshot) Equal(other *MultiTxSnapshot) bool { reflect.DeepEqual(s.accountSuicided, other.accountSuicided) && reflect.DeepEqual(s.accountDeleted, other.accountDeleted) && reflect.DeepEqual(s.accountNotPending, other.accountNotPending) && - reflect.DeepEqual(s.accountNotDirty, other.accountNotDirty) + reflect.DeepEqual(s.accountNotDirty, other.accountNotDirty) && + reflect.DeepEqual(s.touchedAccounts, other.touchedAccounts) } // updateFromJournal updates the snapshot with the changes from the journal. diff --git a/core/state/statedb.go b/core/state/statedb.go index 0cc41cd630..8ec9de32cb 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -717,12 +717,7 @@ func (s *StateDB) Copy() *StateDB { journal: newJournal(), hasher: crypto.NewKeccakState(), } - // Initialize new multi-transaction snapshot stack for the copied state - // NOTE(wazzymandias): We avoid copying the snapshot stack from the original state - // because it may contain snapshots that are not valid for the copied state. - //if s.multiTxSnapshotStack.Size() > 0 { - // panic("cannot copy state with active multi-transaction snapshot stack") - //} + // Initialize copy of multi-transaction snapshot stack for the copied state state.multiTxSnapshotStack = s.multiTxSnapshotStack.Copy(state) // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { diff --git a/miner/algo_state_test.go b/miner/algo_state_test.go index 29eb4d3d9e..636d13553f 100644 --- a/miner/algo_state_test.go +++ b/miner/algo_state_test.go @@ -58,6 +58,10 @@ contract StateFuzzTest { function changeStorage(bytes32 key, bytes memory newValue) public { storageData[key] = newValue; } + + function touchContract(address contractAddress) public view returns (bytes32) { + return extcodehash(contractAddress); + } } ` @@ -82,24 +86,25 @@ func changeBalanceFuzzTestContract(nonce uint64, to, address common.Address, new }, nil } -func resetObjectFuzzTestContract(nonce uint64, address common.Address, key [32]byte) (types.TxData, error) { +func changeStorageFuzzTestContract(chainID *big.Int, nonce uint64, to common.Address, key [32]byte, value []byte) (types.TxData, error) { abi, err := StatefuzztestMetaData.GetAbi() if err != nil { return nil, err } - data, err := abi.Pack("resetObject", key) + data, err := abi.Pack("changeStorage", key, value) if err != nil { return nil, err } - return &types.LegacyTx{ - Nonce: nonce, - GasPrice: big.NewInt(1), - Gas: 10_000_000, - To: (*common.Address)(address[:]), - Value: big.NewInt(0), - Data: data, + return &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + Gas: 100_000, + GasFeeCap: big.NewInt(1), + To: (*common.Address)(to[:]), + Value: big.NewInt(0), + Data: data, }, nil } @@ -125,35 +130,34 @@ func createObjectFuzzTestContract(chainID *big.Int, nonce uint64, to common.Addr }, nil } -func selfDestructFuzzTestContract(chainID *big.Int, nonce uint64, to common.Address) (types.TxData, error) { +func resetObjectFuzzTestContract(nonce uint64, address common.Address, key [32]byte) (types.TxData, error) { abi, err := StatefuzztestMetaData.GetAbi() if err != nil { return nil, err } - data, err := abi.Pack("selfDestruct") + data, err := abi.Pack("resetObject", key) if err != nil { return nil, err } - return &types.DynamicFeeTx{ - ChainID: chainID, - Nonce: nonce, - Gas: 500_000, - GasFeeCap: big.NewInt(1), - To: (*common.Address)(to[:]), - Value: big.NewInt(0), - Data: data, + return &types.LegacyTx{ + Nonce: nonce, + GasPrice: big.NewInt(1), + Gas: 10_000_000, + To: (*common.Address)(address[:]), + Value: big.NewInt(0), + Data: data, }, nil } -func changeStorageFuzzTestContract(chainID *big.Int, nonce uint64, to common.Address, key [32]byte, value []byte) (types.TxData, error) { +func selfDestructFuzzTestContract(chainID *big.Int, nonce uint64, to common.Address) (types.TxData, error) { abi, err := StatefuzztestMetaData.GetAbi() if err != nil { return nil, err } - data, err := abi.Pack("changeStorage", key, value) + data, err := abi.Pack("selfDestruct") if err != nil { return nil, err } @@ -161,7 +165,7 @@ func changeStorageFuzzTestContract(chainID *big.Int, nonce uint64, to common.Add return &types.DynamicFeeTx{ ChainID: chainID, Nonce: nonce, - Gas: 100_000, + Gas: 500_000, GasFeeCap: big.NewInt(1), To: (*common.Address)(to[:]), Value: big.NewInt(0), From ef00a1f9325ca614c028c6c930f54f34bfa81d82 Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Thu, 7 Sep 2023 21:24:08 -0500 Subject: [PATCH 45/46] Add fuzz tests for transient storage and account touch operations --- miner/algo_common_test.go | 7 +- miner/algo_state_test.go | 193 ++++++++++++++++-- miner/contract_simulator_test.go | 248 ++++++++++++----------- miner/env_changes_test.go | 3 +- miner/state_fuzz_test_abigen_bindings.go | 56 ++++- miner/testdata/state_fuzz_test.abi | 2 +- 6 files changed, 366 insertions(+), 143 deletions(-) diff --git a/miner/algo_common_test.go b/miner/algo_common_test.go index e3372626f0..0ac114ebb2 100644 --- a/miner/algo_common_test.go +++ b/miner/algo_common_test.go @@ -41,7 +41,12 @@ type signerList struct { } func simulateBundle(env *environment, bundle types.MevBundle, chData chainData, interrupt *int32) (types.SimulatedBundle, error) { - stateDB := env.state.Copy() + // NOTE(wazzymandias): We are referencing the environment StateDB here - notice that it is not a copy. + // For test scenarios where bundles depend on previous bundle transactions to succeed, it is + // necessary to reference the same StateDB in order to avoid nonce too high errors. + // As a result, it is recommended that the caller make a copy before invoking this function, in order to + // ensure transaction serializability across bundles. + stateDB := env.state gasPool := new(core.GasPool).AddGas(env.header.GasLimit) var totalGasUsed uint64 diff --git a/miner/algo_state_test.go b/miner/algo_state_test.go index 636d13553f..3f51430d71 100644 --- a/miner/algo_state_test.go +++ b/miner/algo_state_test.go @@ -3,12 +3,14 @@ package miner import ( "bytes" "context" + "crypto/ecdsa" "crypto/rand" "encoding/hex" "fmt" "math/big" mathrand "math/rand" "testing" + "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" @@ -24,19 +26,26 @@ import ( // NOTE(wazzymandias): Below is a FuzzTest contract written in Solidity and shown here as reference code // for the generated abi and bytecode used for testing. -// The generated abi can be found in the `testdata` directory. +// The generated abi can be found in the `testdata` directory in `state_fuzz_test.abi`. // The abi, bytecode, and Go bindings were generated using the following commands: // - docker run -v ${STATE_FUZZ_TEST_CONTRACT_DIRECTORY}:/sources // ethereum/solc:0.8.19 -o /sources/output --abi --bin /sources/StateFuzzTest.sol // - go run ./cmd/abigen/ --bin ${TARGET_STATE_FUZZ_TEST_BIN_PATH} --abi ${TARGET_STATE_FUZZ_TEST_ABI_PATH} // --pkg statefuzztest --out=state_fuzz_test_abigen_bindings.go const StateFuzzTestSolidity = ` +// SPDX-License-Identifier: MIT pragma solidity 0.8.19; contract StateFuzzTest { mapping(address => uint256) public balances; mapping(bytes32 => bytes) public storageData; mapping(address => bool) public isSelfDestructed; + mapping(address => uint256) private refunds; + + function addThenWithdrawRefund(uint256 amount) external payable { + refunds[msg.sender] += amount; + payable(msg.sender).transfer(amount); + } function createObject(bytes32 key, bytes memory value) public { storageData[key] = value; @@ -59,9 +68,12 @@ contract StateFuzzTest { storageData[key] = newValue; } - function touchContract(address contractAddress) public view returns (bytes32) { - return extcodehash(contractAddress); - } + function touchContract(address contractAddress) public view returns (bytes32 codeHash) { + assembly { + codeHash := extcodehash(contractAddress) + } + return codeHash; + } } ` @@ -173,6 +185,50 @@ func selfDestructFuzzTestContract(chainID *big.Int, nonce uint64, to common.Addr }, nil } +func touchAccountFuzzTestContract(chainID *big.Int, nonce uint64, address common.Address) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("touchContract", address) + if err != nil { + return nil, err + } + + return &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + Gas: 100_000, + GasFeeCap: big.NewInt(1), + To: (*common.Address)(address[:]), + Value: big.NewInt(0), + Data: data, + }, nil +} + +func addThenWithdrawRefundFuzzTestContract(chainID *big.Int, nonce uint64, to common.Address, value *big.Int) (types.TxData, error) { + abi, err := StatefuzztestMetaData.GetAbi() + if err != nil { + return nil, err + } + + data, err := abi.Pack("addThenWithdrawRefund", value) + if err != nil { + return nil, err + } + + return &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + Gas: 400_000, + GasFeeCap: big.NewInt(1), + To: (*common.Address)(to[:]), + Value: value, + Data: data, + }, nil +} + const ( Baseline = 0 SingleSnapshot = 1 @@ -437,7 +493,8 @@ func TestStateComparisons(t *testing.T) { BlockNumber: header.Number, } - simBundle, err := simulateBundle(env, mevBundle, chData, nil) + envCopy := env.copy() + simBundle, err := simulateBundle(envCopy, mevBundle, chData, nil) require.NoError(t, err, "can't simulate bundle: %v", err) switch i { @@ -654,6 +711,7 @@ func TestBundles(t *testing.T) { for tcIdx, tc := range testContexts { backend := simulations[tcIdx] + // deploy fuzz test smart contract across all the account addresses we wish to test t.Run(fmt.Sprintf("%s-create-object", tc.Name), func(t *testing.T) { signers := tc.signers for signerIdx, pk := range signers.signers { @@ -707,14 +765,41 @@ func TestBundles(t *testing.T) { case Baseline: actualReceipt, _, err = tc.envDiff.commitTx(actualTx, tc.chainData) tc.envDiff.applyToBaseEnv() + signer := tc.envDiff.baseEnvironment.signer + from, senderErr := types.Sender(signer, actualTx) + require.NoError(t, senderErr) + + if err == nil { + expectedNonce := actualTx.Nonce() + 1 + actualNonce := tc.envDiff.baseEnvironment.state.GetNonce(from) + require.Equal(t, expectedNonce, actualNonce) + } else { + expectedNonce := actualTx.Nonce() - 1 + actualNonce := tc.envDiff.baseEnvironment.state.GetNonce(from) + require.Equal(t, expectedNonce, actualNonce) + } case SingleSnapshot: err = tc.changes.env.state.NewMultiTxSnapshot() require.NoError(t, err) - actualReceipt, _, err = tc.changes.commitTx(actualTx, tc.chainData) + var commitErr error + actualReceipt, _, commitErr = tc.changes.commitTx(actualTx, tc.chainData) require.NoError(t, err) err = tc.changes.apply() + + signer := tc.changes.env.signer + from, senderErr := types.Sender(signer, actualTx) + require.NoError(t, senderErr) + if commitErr == nil { + expectedNonce := actualTx.Nonce() + 1 + actualNonce := tc.changes.env.state.GetNonce(from) + require.Equal(t, expectedNonce, actualNonce) + } else { + expectedNonce := actualTx.Nonce() - 1 + actualNonce := tc.changes.env.state.GetNonce(from) + require.Equal(t, expectedNonce, actualNonce) + } case MultiSnapshot: err = tc.changes.env.state.NewMultiTxSnapshot() require.NoError(t, err) @@ -722,13 +807,27 @@ func TestBundles(t *testing.T) { err = tc.changes.env.state.NewMultiTxSnapshot() require.NoError(t, err) - actualReceipt, _, err = tc.changes.commitTx(actualTx, tc.chainData) - require.NoError(t, err) + var commitErr error + actualReceipt, _, commitErr = tc.changes.commitTx(actualTx, tc.chainData) + require.NoError(t, commitErr) err = tc.changes.apply() require.NoError(t, err) err = tc.changes.env.state.MultiTxSnapshotCommit() + + signer := tc.changes.env.signer + from, senderErr := types.Sender(signer, actualTx) + require.NoError(t, senderErr) + if commitErr == nil { + expectedNonce := actualTx.Nonce() + 1 + actualNonce := tc.changes.env.state.GetNonce(from) + require.Equal(t, expectedNonce, actualNonce) + } else { + expectedNonce := actualTx.Nonce() - 1 + actualNonce := tc.changes.env.state.GetNonce(from) + require.Equal(t, expectedNonce, actualNonce) + } } require.NoError(t, err) @@ -751,6 +850,8 @@ func TestBundles(t *testing.T) { // - self-destruct // - reset object // - change storage + // - change transient storage + // - touch account type TransactionOperation int const ( ChangeBalance TransactionOperation = iota @@ -758,21 +859,46 @@ func TestBundles(t *testing.T) { SelfDestruct ResetObject ChangeStorage + ChangeTransientStorage + TouchAccount ) + operations := []TransactionOperation{ + ChangeBalance, + CreateObject, + SelfDestruct, + ResetObject, + ChangeStorage, + ChangeTransientStorage, + TouchAccount, + } const ( - bundleCount = 3 - bundleSize = 10 + bundleCount = 10 + bundleSize = 100 ) + // NOTE(wazzymandias): We make a copy of the signer list before we craft the bundles of transactions. + // The reason is that the pre-bundle signer list will be used to simulate the bundles. + // Using the actual signer list will cause nonce mismatch errors, since we increment nonce + // as we craft the bundles of transactions. + var preBundleSigners = signerList{ + config: testContexts[0].signers.config, + addresses: make([]common.Address, len(testContexts[0].signers.addresses)), + signers: make([]*ecdsa.PrivateKey, len(testContexts[0].signers.signers)), + nonces: make([]uint64, len(testContexts[0].signers.nonces)), + } + copy(preBundleSigners.addresses, testContexts[0].signers.addresses) + copy(preBundleSigners.signers, testContexts[0].signers.signers) + copy(preBundleSigners.nonces, testContexts[0].signers.nonces) + bundles := [bundleCount]types.MevBundle{} for bundleIdx := 0; bundleIdx < bundleCount; bundleIdx++ { transactions := [bundleSize]*types.Transaction{} for txIdx := 0; txIdx < bundleSize; txIdx++ { var ( - // pick a random integer that represents one of the transactions we will create - n = mathrand.Intn(5) - s = testContexts[0].signers - chainID = s.config.ChainID + // pick a random operation that represents one of the transactions we will create + randomOperation = operations[mathrand.Intn(len(operations))] + s = testContexts[0].signers + chainID = s.config.ChainID // choose a random To Address index toAddressRandomIdx = mathrand.Intn(len(s.signers)) // reference the correct nonce for the associated To Address @@ -782,7 +908,7 @@ func TestBundles(t *testing.T) { txData types.TxData err error ) - switch TransactionOperation(n) { + switch randomOperation { case ChangeBalance: // change balance balanceAddressRandomIdx := mathrand.Intn(len(s.signers)) balanceAddress := s.addresses[balanceAddressRandomIdx] @@ -826,6 +952,18 @@ func TestBundles(t *testing.T) { require.NoError(t, err) txData, err = changeStorageFuzzTestContract(chainID, nonce, fuzzContractAddress, changeStorageObjectKey, value[:]) + + case ChangeTransientStorage: // change transient storage + value := new(big.Int).Rand( + mathrand.New(mathrand.NewSource(time.Now().UnixNano())), big.NewInt(1000000), + ) + require.NoError(t, err) + + txData, err = addThenWithdrawRefundFuzzTestContract(chainID, nonce, toAddress, value) + case TouchAccount: // touch random account + fuzzContractAddress := variantFuzzTestAddresses[0][toAddressRandomIdx] + + txData, err = touchAccountFuzzTestContract(chainID, nonce, fuzzContractAddress) } require.NotNilf(t, txData, "txData is nil for bundle %d, tx %d", bundleIdx, txIdx) require.NoError(t, err) @@ -839,8 +977,13 @@ func TestBundles(t *testing.T) { multi := testContexts[MultiSnapshot] base.signers.nonces[toAddressRandomIdx]++ + testContexts[Baseline].signers = base.signers + single.signers.nonces[toAddressRandomIdx]++ + testContexts[SingleSnapshot].signers = single.signers + multi.signers.nonces[toAddressRandomIdx]++ + testContexts[MultiSnapshot].signers = multi.signers } bundles[bundleIdx] = types.MevBundle{ @@ -859,36 +1002,44 @@ func TestBundles(t *testing.T) { } } + // commit bundles to each test context, with intermittent bundle failures + const bundleFailEveryN = 2 var ( + base = testContexts[0] commitErrMap = map[int]error{ Baseline: nil, SingleSnapshot: nil, MultiSnapshot: nil, } + genesisAlloc = genGenesisAlloc(preBundleSigners, + []common.Address{payProxyAddress, logContractAddress}, [][]byte{payProxyCode, logContractCode}) ) - - base := testContexts[0] - genesisAlloc := genGenesisAlloc(base.signers, - []common.Address{payProxyAddress, logContractAddress}, [][]byte{payProxyCode, logContractCode}) simulatedBundleList, err := simulateBundles(base.chainData.chainConfig, types.CopyHeader(base.env.header), genesisAlloc, bundles[:]) require.NoError(t, err) + require.Len(t, simulatedBundleList, len(bundles)) // commit bundles one by one to each test context to make sure each bundle result is deterministic // apply all to the underlying environment at the end - for _, b := range simulatedBundleList { + for bundleIdx, b := range simulatedBundleList { algoConf := defaultAlgorithmConfig algoConf.EnforceProfit = true + shouldRevert := bundleFailEveryN != 0 && bundleIdx%bundleFailEveryN == 0 for tcIdx, tc := range testContexts { var commitErr error switch tcIdx { case Baseline: + // We don't commit bundle to Baseline if it's meant to fail, in order to ensure that the state + // for SingleSnapshot and MultiSnapshot matches on revert to the baseline state + if shouldRevert { + break + } commitErr = tc.envDiff.commitBundle(&b, tc.chainData, nil, algoConf) case SingleSnapshot, MultiSnapshot: commitErr = tc.changes.commitBundle(&b, tc.chainData, algoConf) - if commitErrMap[Baseline] != nil { + if commitErrMap[Baseline] != nil || shouldRevert { require.NoError(t, tc.changes.env.state.MultiTxSnapshotRevert()) } else { require.NoError(t, tc.changes.env.state.MultiTxSnapshotCommit()) diff --git a/miner/contract_simulator_test.go b/miner/contract_simulator_test.go index 973053fb83..c14c83983b 100644 --- a/miner/contract_simulator_test.go +++ b/miner/contract_simulator_test.go @@ -110,153 +110,167 @@ func parseAbi(t *testing.T, filename string) *abi.ABI { func TestSimulatorState(t *testing.T) { // enableLogging() - t.Cleanup(func() { - testConfig.AlgoType = ALGO_MEV_GETH - testConfig.BuilderTxSigningKey = nil - testConfig.Etherbase = common.Address{} - }) - - testConfig.AlgoType = ALGO_GREEDY - var err error - testConfig.BuilderTxSigningKey, err = crypto.GenerateKey() - require.NoError(t, err) - testConfig.Etherbase = crypto.PubkeyToAddress(testConfig.BuilderTxSigningKey.PublicKey) - - db := rawdb.NewMemoryDatabase() - chainConfig := *params.AllEthashProtocolChanges - chainConfig.ChainID = big.NewInt(31337) - engine := ethash.NewFaker() - - // (not needed I think) chainConfig.LondonBlock = big.NewInt(0) - deployerKey, err := crypto.ToECDSA(hexutil.MustDecode("0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80")) - deployerAddress := crypto.PubkeyToAddress(deployerKey.PublicKey) - deployerTestAddress := common.HexToAddress("0x70997970C51812dc3A010C7d01b50e0d17dc79C8") - alloc := core.GenesisAlloc{deployerAddress: {Balance: new(big.Int).Mul(big.NewInt(10000), bigEther)}, deployerTestAddress: {Balance: new(big.Int).Mul(big.NewInt(10000), bigEther)}} - - testParticipants := NewTestParticipants(5, 5) - alloc = testParticipants.AppendToGenesisAlloc(alloc) - - var genesis = core.Genesis{ - Config: &chainConfig, - Alloc: alloc, - GasLimit: 30000000, - } + algorithmTable := []AlgoType{ALGO_GREEDY, ALGO_GREEDY_BUCKETS, ALGO_GREEDY_MULTISNAP, ALGO_GREEDY_BUCKETS_MULTISNAP} + for _, algo := range algorithmTable { + t.Run(algo.String(), func(t *testing.T) { + t.Cleanup(func() { + testConfig.AlgoType = ALGO_MEV_GETH + testConfig.BuilderTxSigningKey = nil + testConfig.Etherbase = common.Address{} + }) + + testConfig.AlgoType = algo + var err error + testConfig.BuilderTxSigningKey, err = crypto.GenerateKey() + require.NoError(t, err) + testConfig.Etherbase = crypto.PubkeyToAddress(testConfig.BuilderTxSigningKey.PublicKey) + + db := rawdb.NewMemoryDatabase() + defer func() { + require.NoError(t, db.Close()) + }() + + chainConfig := *params.AllEthashProtocolChanges + chainConfig.ChainID = big.NewInt(31337) + engine := ethash.NewFaker() + defer func() { + require.NoError(t, engine.Close()) + }() + + // (not needed I think) chainConfig.LondonBlock = big.NewInt(0) + deployerKey, err := crypto.ToECDSA(hexutil.MustDecode("0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80")) + require.NoError(t, err) - w, b := newTestWorkerGenesis(t, &chainConfig, engine, db, genesis, 0) - w.setEtherbase(crypto.PubkeyToAddress(testConfig.BuilderTxSigningKey.PublicKey)) + deployerAddress := crypto.PubkeyToAddress(deployerKey.PublicKey) + deployerTestAddress := common.HexToAddress("0x70997970C51812dc3A010C7d01b50e0d17dc79C8") + alloc := core.GenesisAlloc{deployerAddress: {Balance: new(big.Int).Mul(big.NewInt(10000), bigEther)}, deployerTestAddress: {Balance: new(big.Int).Mul(big.NewInt(10000), bigEther)}} - simBackend := backends.NewSimulatedBackendChain(db, b.chain) + testParticipants := NewTestParticipants(5, 5) + alloc = testParticipants.AppendToGenesisAlloc(alloc) - univ2FactoryA := NewTContract(t, simBackend, "testdata/univ2factory.abi", univ2FactoryA_Address) - univ2FactoryB := NewTContract(t, simBackend, "testdata/univ2factory.abi", univ2FactoryB_Address) + var genesis = core.Genesis{ + Config: &chainConfig, + Alloc: alloc, + GasLimit: 30000000, + } - wethContract := NewTContract(t, simBackend, "testdata/weth.abi", wethAddress) - daiContract := NewTContract(t, simBackend, "testdata/dai.abi", daiAddress) - atomicSwapContract := NewTContract(t, simBackend, "testdata/swap.abi", atomicSwapAddress) + w, b := newTestWorkerGenesis(t, &chainConfig, engine, db, genesis, 0) + w.setEtherbase(crypto.PubkeyToAddress(testConfig.BuilderTxSigningKey.PublicKey)) - testAddress1Key, _ := crypto.GenerateKey() - testAddress1 := crypto.PubkeyToAddress(testAddress1Key.PublicKey) + simBackend := backends.NewSimulatedBackendChain(db, b.chain) - rand.New(rand.NewSource(10)) + univ2FactoryA := NewTContract(t, simBackend, "testdata/univ2factory.abi", univ2FactoryA_Address) + univ2FactoryB := NewTContract(t, simBackend, "testdata/univ2factory.abi", univ2FactoryB_Address) - deploymentTxs := deployAllContracts(t, deployerKey, b.chain.CurrentHeader().BaseFee) + wethContract := NewTContract(t, simBackend, "testdata/weth.abi", wethAddress) + daiContract := NewTContract(t, simBackend, "testdata/dai.abi", daiAddress) + atomicSwapContract := NewTContract(t, simBackend, "testdata/swap.abi", atomicSwapAddress) - getBaseFee := func() *big.Int { - return new(big.Int).Mul(big.NewInt(2), b.chain.CurrentHeader().BaseFee) - } + testAddress1Key, _ := crypto.GenerateKey() + testAddress1 := crypto.PubkeyToAddress(testAddress1Key.PublicKey) - nonceModFor := big.NewInt(0) - nonceMod := make(map[common.Address]uint64) - getNonce := func(addr common.Address) uint64 { - if nonceModFor.Cmp(b.chain.CurrentHeader().Number) != 0 { - nonceMod = make(map[common.Address]uint64) - nonceModFor.Set(b.chain.CurrentHeader().Number) - } - - cm := nonceMod[addr] - nonceMod[addr] = cm + 1 - return b.txPool.Nonce(addr) + cm - } + rand.New(rand.NewSource(10)) - prepareContractCallTx := func(contract tConctract, signerKey *ecdsa.PrivateKey, method string, args ...interface{}) *types.Transaction { - callData, err := contract.abi.Pack(method, args...) - require.NoError(t, err) + deploymentTxs := deployAllContracts(t, deployerKey, b.chain.CurrentHeader().BaseFee) - fromAddress := crypto.PubkeyToAddress(signerKey.PublicKey) + getBaseFee := func() *big.Int { + return new(big.Int).Mul(big.NewInt(2), b.chain.CurrentHeader().BaseFee) + } - callRes, err := contract.doCall(fromAddress, method, args...) - if err != nil { - t.Errorf("Prepared smart contract call error %s with result %s", err.Error(), string(callRes)) - } + nonceModFor := big.NewInt(0) + nonceMod := make(map[common.Address]uint64) + getNonce := func(addr common.Address) uint64 { + if nonceModFor.Cmp(b.chain.CurrentHeader().Number) != 0 { + nonceMod = make(map[common.Address]uint64) + nonceModFor.Set(b.chain.CurrentHeader().Number) + } - tx, err := types.SignTx(types.NewTransaction(getNonce(fromAddress), contract.address, new(big.Int), 9000000, getBaseFee(), callData), types.HomesteadSigner{}, signerKey) - require.NoError(t, err) + cm := nonceMod[addr] + nonceMod[addr] = cm + 1 + return b.txPool.Nonce(addr) + cm + } - return tx - } + prepareContractCallTx := func(contract tConctract, signerKey *ecdsa.PrivateKey, method string, args ...interface{}) *types.Transaction { + callData, err := contract.abi.Pack(method, args...) + require.NoError(t, err) - buildBlock := func(txs []*types.Transaction, requireTx int) *types.Block { - errs := b.txPool.AddLocals(txs) - for _, err := range errs { - require.NoError(t, err) - } + fromAddress := crypto.PubkeyToAddress(signerKey.PublicKey) - block, _, err := w.getSealingBlock(b.chain.CurrentBlock().Hash(), b.chain.CurrentHeader().Time+12, testAddress1, 0, common.Hash{}, nil, false, nil) - require.NoError(t, err) - require.NotNil(t, block) - if requireTx != -1 { - require.Equal(t, requireTx, len(block.Transactions())) - } - _, err = b.chain.InsertChain([]*types.Block{block}) - require.NoError(t, err) - return block - } + callRes, err := contract.doCall(fromAddress, method, args...) + if err != nil { + t.Errorf("Prepared smart contract call error %s with result %s", err.Error(), string(callRes)) + } - buildBlock(deploymentTxs, len(deploymentTxs)+1) - require.Equal(t, uint64(18), b.txPool.Nonce(deployerAddress)) - require.Equal(t, uint64(3), b.txPool.Nonce(deployerTestAddress)) + tx, err := types.SignTx(types.NewTransaction(getNonce(fromAddress), contract.address, new(big.Int), 9000000, getBaseFee(), callData), types.HomesteadSigner{}, signerKey) + require.NoError(t, err) - // Mint tokens - require.NoError(t, err) + return tx + } - approveTxs := []*types.Transaction{} + buildBlock := func(txs []*types.Transaction, requireTx int) *types.Block { + errs := b.txPool.AddLocals(txs) + for _, err := range errs { + require.NoError(t, err) + } - adminApproveTxWeth := prepareContractCallTx(wethContract, deployerKey, "approve", atomicSwapContract.address, ethmath.MaxBig256) - approveTxs = append(approveTxs, adminApproveTxWeth) - adminApproveTxDai := prepareContractCallTx(daiContract, deployerKey, "approve", atomicSwapContract.address, ethmath.MaxBig256) - approveTxs = append(approveTxs, adminApproveTxDai) + block, _, err := w.getSealingBlock(b.chain.CurrentBlock().Hash(), b.chain.CurrentHeader().Time+12, testAddress1, 0, common.Hash{}, nil, false, nil) + require.NoError(t, err) + require.NotNil(t, block) + if requireTx != -1 { + require.Equal(t, requireTx, len(block.Transactions())) + } + _, err = b.chain.InsertChain([]*types.Block{block}) + require.NoError(t, err) + return block + } - for _, spender := range []TestParticipant{testParticipants.users[0], testParticipants.searchers[0]} { - mintTx := prepareContractCallTx(daiContract, deployerKey, "mint", spender.address, new(big.Int).Mul(bigEther, big.NewInt(50000))) - approveTxs = append(approveTxs, mintTx) + buildBlock(deploymentTxs, len(deploymentTxs)+1) + require.Equal(t, uint64(18), b.txPool.Nonce(deployerAddress)) + require.Equal(t, uint64(3), b.txPool.Nonce(deployerTestAddress)) - depositTx, err := types.SignTx(types.NewTransaction(getNonce(spender.address), wethContract.address, new(big.Int).Mul(bigEther, big.NewInt(1000)), 9000000, getBaseFee(), hexutil.MustDecode("0xd0e30db0")), types.HomesteadSigner{}, spender.key) - require.NoError(t, err) - approveTxs = append(approveTxs, depositTx) + // Mint tokens + require.NoError(t, err) - spenderApproveTxWeth := prepareContractCallTx(wethContract, spender.key, "approve", atomicSwapContract.address, ethmath.MaxBig256) - approveTxs = append(approveTxs, spenderApproveTxWeth) + approveTxs := []*types.Transaction{} - spenderApproveTxDai := prepareContractCallTx(daiContract, spender.key, "approve", atomicSwapContract.address, ethmath.MaxBig256) - approveTxs = append(approveTxs, spenderApproveTxDai) - } + adminApproveTxWeth := prepareContractCallTx(wethContract, deployerKey, "approve", atomicSwapContract.address, ethmath.MaxBig256) + approveTxs = append(approveTxs, adminApproveTxWeth) + adminApproveTxDai := prepareContractCallTx(daiContract, deployerKey, "approve", atomicSwapContract.address, ethmath.MaxBig256) + approveTxs = append(approveTxs, adminApproveTxDai) - buildBlock(approveTxs, len(approveTxs)+1) + for _, spender := range []TestParticipant{testParticipants.users[0], testParticipants.searchers[0]} { + mintTx := prepareContractCallTx(daiContract, deployerKey, "mint", spender.address, new(big.Int).Mul(bigEther, big.NewInt(50000))) + approveTxs = append(approveTxs, mintTx) - amtIn := new(big.Int).Mul(bigEther, big.NewInt(50)) + depositTx, err := types.SignTx(types.NewTransaction(getNonce(spender.address), wethContract.address, new(big.Int).Mul(bigEther, big.NewInt(1000)), 9000000, getBaseFee(), hexutil.MustDecode("0xd0e30db0")), types.HomesteadSigner{}, spender.key) + require.NoError(t, err) + approveTxs = append(approveTxs, depositTx) - userSwapTx := prepareContractCallTx(atomicSwapContract, testParticipants.users[0].key, "swap", []common.Address{wethContract.address, daiContract.address}, amtIn, univ2FactoryA.address, testParticipants.users[0].address, false) + spenderApproveTxWeth := prepareContractCallTx(wethContract, spender.key, "approve", atomicSwapContract.address, ethmath.MaxBig256) + approveTxs = append(approveTxs, spenderApproveTxWeth) - backrunTxData, err := atomicSwapContract.abi.Pack("backrun", daiContract.address, univ2FactoryB.address, univ2FactoryA.address, new(big.Int).Div(amtIn, big.NewInt(2))) - require.NoError(t, err) + spenderApproveTxDai := prepareContractCallTx(daiContract, spender.key, "approve", atomicSwapContract.address, ethmath.MaxBig256) + approveTxs = append(approveTxs, spenderApproveTxDai) + } - backrunTx, err := types.SignTx(types.NewTransaction(getNonce(testParticipants.searchers[0].address), atomicSwapContract.address, new(big.Int), 9000000, getBaseFee(), backrunTxData), types.HomesteadSigner{}, testParticipants.searchers[0].key) - require.NoError(t, err) + buildBlock(approveTxs, len(approveTxs)+1) + + amtIn := new(big.Int).Mul(bigEther, big.NewInt(50)) - targetBlockNumber := new(big.Int).Set(b.chain.CurrentHeader().Number) - targetBlockNumber.Add(targetBlockNumber, big.NewInt(1)) - b.txPool.AddMevBundle(types.Transactions{userSwapTx, backrunTx}, targetBlockNumber, uuid.UUID{}, common.Address{}, 0, 0, nil) - buildBlock([]*types.Transaction{}, 3) + userSwapTx := prepareContractCallTx(atomicSwapContract, testParticipants.users[0].key, "swap", []common.Address{wethContract.address, daiContract.address}, amtIn, univ2FactoryA.address, testParticipants.users[0].address, false) + + backrunTxData, err := atomicSwapContract.abi.Pack("backrun", daiContract.address, univ2FactoryB.address, univ2FactoryA.address, new(big.Int).Div(amtIn, big.NewInt(2))) + require.NoError(t, err) + + backrunTx, err := types.SignTx(types.NewTransaction(getNonce(testParticipants.searchers[0].address), atomicSwapContract.address, new(big.Int), 9000000, getBaseFee(), backrunTxData), types.HomesteadSigner{}, testParticipants.searchers[0].key) + require.NoError(t, err) + + targetBlockNumber := new(big.Int).Set(b.chain.CurrentHeader().Number) + targetBlockNumber.Add(targetBlockNumber, big.NewInt(1)) + b.txPool.AddMevBundle(types.Transactions{userSwapTx, backrunTx}, targetBlockNumber, uuid.UUID{}, common.Address{}, 0, 0, nil) + buildBlock([]*types.Transaction{}, 3) + }) + } } type tConctract struct { diff --git a/miner/env_changes_test.go b/miner/env_changes_test.go index decb179dca..6396c9b94a 100644 --- a/miner/env_changes_test.go +++ b/miner/env_changes_test.go @@ -71,7 +71,8 @@ func TestBundleCommitSnaps(t *testing.T) { BlockNumber: env.header.Number, } - simBundle, err := simulateBundle(env, bundle, chData, nil) + envCopy := env.copy() + simBundle, err := simulateBundle(envCopy, bundle, chData, nil) if err != nil { t.Fatal("Failed to simulate bundle", err) } diff --git a/miner/state_fuzz_test_abigen_bindings.go b/miner/state_fuzz_test_abigen_bindings.go index 1f2d076628..06a1a70039 100644 --- a/miner/state_fuzz_test_abigen_bindings.go +++ b/miner/state_fuzz_test_abigen_bindings.go @@ -31,8 +31,8 @@ var ( // StatefuzztestMetaData contains all meta data concerning the Statefuzztest contract. var StatefuzztestMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"balances\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"changeBalance\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"newValue\",\"type\":\"bytes\"}],\"name\":\"changeStorage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"value\",\"type\":\"bytes\"}],\"name\":\"createObject\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"isSelfDestructed\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"}],\"name\":\"resetObject\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"selfDestruct\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"storageData\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", - Bin: "0x608060405234801561001057600080fd5b50610b1f806100206000396000f3fe608060405234801561001057600080fd5b50600436106100885760003560e01c8063b0d50e381161005b578063b0d50e38146100ff578063c522de441461012f578063d58010651461015f578063f529d4481461017b57610088565b806327e235e31461008d5780637a5ae62e146100bd5780639cb8a26a146100d9578063a2601e0a146100e3575b600080fd5b6100a760048036038101906100a29190610462565b610197565b6040516100b491906104a8565b60405180910390f35b6100d760048036038101906100d291906104f9565b6101af565b005b6100e16101d1565b005b6100fd60048036038101906100f8919061066c565b610242565b005b61011960048036038101906101149190610462565b610267565b60405161012691906106e3565b60405180910390f35b610149600480360381019061014491906104f9565b610287565b604051610156919061077d565b60405180910390f35b6101796004803603810190610174919061066c565b610327565b005b610195600480360381019061019091906107cb565b61034c565b005b60006020528060005260406000206000915090505481565b6001600082815260200190815260200160002060006101ce9190610393565b50565b6001600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff0219169083151502179055503373ffffffffffffffffffffffffffffffffffffffff16ff5b806001600084815260200190815260200160002090816102629190610a17565b505050565b60026020528060005260406000206000915054906101000a900460ff1681565b600160205280600052604060002060009150905080546102a69061083a565b80601f01602080910402602001604051908101604052809291908181526020018280546102d29061083a565b801561031f5780601f106102f45761010080835404028352916020019161031f565b820191906000526020600020905b81548152906001019060200180831161030257829003601f168201915b505050505081565b806001600084815260200190815260200160002090816103479190610a17565b505050565b806000808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055505050565b50805461039f9061083a565b6000825580601f106103b157506103d0565b601f0160209004906000526020600020908101906103cf91906103d3565b5b50565b5b808211156103ec5760008160009055506001016103d4565b5090565b6000604051905090565b600080fd5b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b600061042f82610404565b9050919050565b61043f81610424565b811461044a57600080fd5b50565b60008135905061045c81610436565b92915050565b600060208284031215610478576104776103fa565b5b60006104868482850161044d565b91505092915050565b6000819050919050565b6104a28161048f565b82525050565b60006020820190506104bd6000830184610499565b92915050565b6000819050919050565b6104d6816104c3565b81146104e157600080fd5b50565b6000813590506104f3816104cd565b92915050565b60006020828403121561050f5761050e6103fa565b5b600061051d848285016104e4565b91505092915050565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61057982610530565b810181811067ffffffffffffffff8211171561059857610597610541565b5b80604052505050565b60006105ab6103f0565b90506105b78282610570565b919050565b600067ffffffffffffffff8211156105d7576105d6610541565b5b6105e082610530565b9050602081019050919050565b82818337600083830152505050565b600061060f61060a846105bc565b6105a1565b90508281526020810184848401111561062b5761062a61052b565b5b6106368482856105ed565b509392505050565b600082601f83011261065357610652610526565b5b81356106638482602086016105fc565b91505092915050565b60008060408385031215610683576106826103fa565b5b6000610691858286016104e4565b925050602083013567ffffffffffffffff8111156106b2576106b16103ff565b5b6106be8582860161063e565b9150509250929050565b60008115159050919050565b6106dd816106c8565b82525050565b60006020820190506106f860008301846106d4565b92915050565b600081519050919050565b600082825260208201905092915050565b60005b8381101561073857808201518184015260208101905061071d565b60008484015250505050565b600061074f826106fe565b6107598185610709565b935061076981856020860161071a565b61077281610530565b840191505092915050565b600060208201905081810360008301526107978184610744565b905092915050565b6107a88161048f565b81146107b357600080fd5b50565b6000813590506107c58161079f565b92915050565b600080604083850312156107e2576107e16103fa565b5b60006107f08582860161044d565b9250506020610801858286016107b6565b9150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b6000600282049050600182168061085257607f821691505b6020821081036108655761086461080b565b5b50919050565b60008190508160005260206000209050919050565b60006020601f8301049050919050565b600082821b905092915050565b6000600883026108cd7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82610890565b6108d78683610890565b95508019841693508086168417925050509392505050565b6000819050919050565b600061091461090f61090a8461048f565b6108ef565b61048f565b9050919050565b6000819050919050565b61092e836108f9565b61094261093a8261091b565b84845461089d565b825550505050565b600090565b61095761094a565b610962818484610925565b505050565b5b818110156109865761097b60008261094f565b600181019050610968565b5050565b601f8211156109cb5761099c8161086b565b6109a584610880565b810160208510156109b4578190505b6109c86109c085610880565b830182610967565b50505b505050565b600082821c905092915050565b60006109ee600019846008026109d0565b1980831691505092915050565b6000610a0783836109dd565b9150826002028217905092915050565b610a20826106fe565b67ffffffffffffffff811115610a3957610a38610541565b5b610a43825461083a565b610a4e82828561098a565b600060209050601f831160018114610a815760008415610a6f578287015190505b610a7985826109fb565b865550610ae1565b601f198416610a8f8661086b565b60005b82811015610ab757848901518255600182019150602085019450602081019050610a92565b86831015610ad45784890151610ad0601f8916826109dd565b8355505b6001600288020188555050505b50505050505056fea26469706673582212202f3e2761204e887bab7c8f092e2346bad94e865f80979db9a6915f9d2bdbc03c64736f6c63430008130033", + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"addThenWithdrawRefund\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"balances\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"changeBalance\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"newValue\",\"type\":\"bytes\"}],\"name\":\"changeStorage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"value\",\"type\":\"bytes\"}],\"name\":\"createObject\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"isSelfDestructed\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"}],\"name\":\"resetObject\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"selfDestruct\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"storageData\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"contractAddress\",\"type\":\"address\"}],\"name\":\"touchContract\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"codeHash\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610d4e806100206000396000f3fe6080604052600436106100915760003560e01c8063a2601e0a11610059578063a2601e0a1461016c578063b0d50e3814610195578063c522de44146101d2578063d58010651461020f578063f529d4481461023857610091565b806327e235e3146100965780633e978173146100d3578063798d40e3146100ef5780637a5ae62e1461012c5780639cb8a26a14610155575b600080fd5b3480156100a257600080fd5b506100bd60048036038101906100b891906105d7565b610261565b6040516100ca919061061d565b60405180910390f35b6100ed60048036038101906100e89190610664565b610279565b005b3480156100fb57600080fd5b50610116600480360381019061011191906105d7565b610319565b60405161012391906106aa565b60405180910390f35b34801561013857600080fd5b50610153600480360381019061014e91906106f1565b610324565b005b34801561016157600080fd5b5061016a610346565b005b34801561017857600080fd5b50610193600480360381019061018e9190610864565b6103b7565b005b3480156101a157600080fd5b506101bc60048036038101906101b791906105d7565b6103dc565b6040516101c991906108db565b60405180910390f35b3480156101de57600080fd5b506101f960048036038101906101f491906106f1565b6103fc565b6040516102069190610975565b60405180910390f35b34801561021b57600080fd5b5061023660048036038101906102319190610864565b61049c565b005b34801561024457600080fd5b5061025f600480360381019061025a9190610997565b6104c1565b005b60006020528060005260406000206000915090505481565b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282546102c89190610a06565b925050819055503373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015610315573d6000803e3d6000fd5b5050565b6000813f9050919050565b6001600082815260200190815260200160002060006103439190610508565b50565b6001600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff0219169083151502179055503373ffffffffffffffffffffffffffffffffffffffff16ff5b806001600084815260200190815260200160002090816103d79190610c46565b505050565b60026020528060005260406000206000915054906101000a900460ff1681565b6001602052806000526040600020600091509050805461041b90610a69565b80601f016020809104026020016040519081016040528092919081815260200182805461044790610a69565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b505050505081565b806001600084815260200190815260200160002090816104bc9190610c46565b505050565b806000808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055505050565b50805461051490610a69565b6000825580601f106105265750610545565b601f0160209004906000526020600020908101906105449190610548565b5b50565b5b80821115610561576000816000905550600101610549565b5090565b6000604051905090565b600080fd5b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006105a482610579565b9050919050565b6105b481610599565b81146105bf57600080fd5b50565b6000813590506105d1816105ab565b92915050565b6000602082840312156105ed576105ec61056f565b5b60006105fb848285016105c2565b91505092915050565b6000819050919050565b61061781610604565b82525050565b6000602082019050610632600083018461060e565b92915050565b61064181610604565b811461064c57600080fd5b50565b60008135905061065e81610638565b92915050565b60006020828403121561067a5761067961056f565b5b60006106888482850161064f565b91505092915050565b6000819050919050565b6106a481610691565b82525050565b60006020820190506106bf600083018461069b565b92915050565b6106ce81610691565b81146106d957600080fd5b50565b6000813590506106eb816106c5565b92915050565b6000602082840312156107075761070661056f565b5b6000610715848285016106dc565b91505092915050565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61077182610728565b810181811067ffffffffffffffff821117156107905761078f610739565b5b80604052505050565b60006107a3610565565b90506107af8282610768565b919050565b600067ffffffffffffffff8211156107cf576107ce610739565b5b6107d882610728565b9050602081019050919050565b82818337600083830152505050565b6000610807610802846107b4565b610799565b90508281526020810184848401111561082357610822610723565b5b61082e8482856107e5565b509392505050565b600082601f83011261084b5761084a61071e565b5b813561085b8482602086016107f4565b91505092915050565b6000806040838503121561087b5761087a61056f565b5b6000610889858286016106dc565b925050602083013567ffffffffffffffff8111156108aa576108a9610574565b5b6108b685828601610836565b9150509250929050565b60008115159050919050565b6108d5816108c0565b82525050565b60006020820190506108f060008301846108cc565b92915050565b600081519050919050565b600082825260208201905092915050565b60005b83811015610930578082015181840152602081019050610915565b60008484015250505050565b6000610947826108f6565b6109518185610901565b9350610961818560208601610912565b61096a81610728565b840191505092915050565b6000602082019050818103600083015261098f818461093c565b905092915050565b600080604083850312156109ae576109ad61056f565b5b60006109bc858286016105c2565b92505060206109cd8582860161064f565b9150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000610a1182610604565b9150610a1c83610604565b9250828201905080821115610a3457610a336109d7565b5b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b60006002820490506001821680610a8157607f821691505b602082108103610a9457610a93610a3a565b5b50919050565b60008190508160005260206000209050919050565b60006020601f8301049050919050565b600082821b905092915050565b600060088302610afc7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82610abf565b610b068683610abf565b95508019841693508086168417925050509392505050565b6000819050919050565b6000610b43610b3e610b3984610604565b610b1e565b610604565b9050919050565b6000819050919050565b610b5d83610b28565b610b71610b6982610b4a565b848454610acc565b825550505050565b600090565b610b86610b79565b610b91818484610b54565b505050565b5b81811015610bb557610baa600082610b7e565b600181019050610b97565b5050565b601f821115610bfa57610bcb81610a9a565b610bd484610aaf565b81016020851015610be3578190505b610bf7610bef85610aaf565b830182610b96565b50505b505050565b600082821c905092915050565b6000610c1d60001984600802610bff565b1980831691505092915050565b6000610c368383610c0c565b9150826002028217905092915050565b610c4f826108f6565b67ffffffffffffffff811115610c6857610c67610739565b5b610c728254610a69565b610c7d828285610bb9565b600060209050601f831160018114610cb05760008415610c9e578287015190505b610ca88582610c2a565b865550610d10565b601f198416610cbe86610a9a565b60005b82811015610ce657848901518255600182019150602085019450602081019050610cc1565b86831015610d035784890151610cff601f891682610c0c565b8355505b6001600288020188555050505b50505050505056fea2646970667358221220bf0fddc0e0582d2115c83591396205edb56de333d7cc4ef10f8a3d740b137fc464736f6c63430008130033", } // StatefuzztestABI is the input ABI used to generate the binding from. @@ -295,6 +295,58 @@ func (_Statefuzztest *StatefuzztestCallerSession) StorageData(arg0 [32]byte) ([] return _Statefuzztest.Contract.StorageData(&_Statefuzztest.CallOpts, arg0) } +// TouchContract is a free data retrieval call binding the contract method 0x798d40e3. +// +// Solidity: function touchContract(address contractAddress) view returns(bytes32 codeHash) +func (_Statefuzztest *StatefuzztestCaller) TouchContract(opts *bind.CallOpts, contractAddress common.Address) ([32]byte, error) { + var out []interface{} + err := _Statefuzztest.contract.Call(opts, &out, "touchContract", contractAddress) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +// TouchContract is a free data retrieval call binding the contract method 0x798d40e3. +// +// Solidity: function touchContract(address contractAddress) view returns(bytes32 codeHash) +func (_Statefuzztest *StatefuzztestSession) TouchContract(contractAddress common.Address) ([32]byte, error) { + return _Statefuzztest.Contract.TouchContract(&_Statefuzztest.CallOpts, contractAddress) +} + +// TouchContract is a free data retrieval call binding the contract method 0x798d40e3. +// +// Solidity: function touchContract(address contractAddress) view returns(bytes32 codeHash) +func (_Statefuzztest *StatefuzztestCallerSession) TouchContract(contractAddress common.Address) ([32]byte, error) { + return _Statefuzztest.Contract.TouchContract(&_Statefuzztest.CallOpts, contractAddress) +} + +// AddThenWithdrawRefund is a paid mutator transaction binding the contract method 0x3e978173. +// +// Solidity: function addThenWithdrawRefund(uint256 amount) payable returns() +func (_Statefuzztest *StatefuzztestTransactor) AddThenWithdrawRefund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _Statefuzztest.contract.Transact(opts, "addThenWithdrawRefund", amount) +} + +// AddThenWithdrawRefund is a paid mutator transaction binding the contract method 0x3e978173. +// +// Solidity: function addThenWithdrawRefund(uint256 amount) payable returns() +func (_Statefuzztest *StatefuzztestSession) AddThenWithdrawRefund(amount *big.Int) (*types.Transaction, error) { + return _Statefuzztest.Contract.AddThenWithdrawRefund(&_Statefuzztest.TransactOpts, amount) +} + +// AddThenWithdrawRefund is a paid mutator transaction binding the contract method 0x3e978173. +// +// Solidity: function addThenWithdrawRefund(uint256 amount) payable returns() +func (_Statefuzztest *StatefuzztestTransactorSession) AddThenWithdrawRefund(amount *big.Int) (*types.Transaction, error) { + return _Statefuzztest.Contract.AddThenWithdrawRefund(&_Statefuzztest.TransactOpts, amount) +} + // ChangeBalance is a paid mutator transaction binding the contract method 0xf529d448. // // Solidity: function changeBalance(address account, uint256 newBalance) returns() diff --git a/miner/testdata/state_fuzz_test.abi b/miner/testdata/state_fuzz_test.abi index fc0178be7b..92bdef0182 100644 --- a/miner/testdata/state_fuzz_test.abi +++ b/miner/testdata/state_fuzz_test.abi @@ -1 +1 @@ -[{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"balances","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"},{"internalType":"uint256","name":"newBalance","type":"uint256"}],"name":"changeBalance","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"key","type":"bytes32"},{"internalType":"bytes","name":"newValue","type":"bytes"}],"name":"changeStorage","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"key","type":"bytes32"},{"internalType":"bytes","name":"value","type":"bytes"}],"name":"createObject","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"isSelfDestructed","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"key","type":"bytes32"}],"name":"resetObject","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"selfDestruct","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"name":"storageData","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"}] \ No newline at end of file +[{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"addThenWithdrawRefund","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"balances","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"},{"internalType":"uint256","name":"newBalance","type":"uint256"}],"name":"changeBalance","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"key","type":"bytes32"},{"internalType":"bytes","name":"newValue","type":"bytes"}],"name":"changeStorage","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"key","type":"bytes32"},{"internalType":"bytes","name":"value","type":"bytes"}],"name":"createObject","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"isSelfDestructed","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"key","type":"bytes32"}],"name":"resetObject","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"selfDestruct","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"name":"storageData","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"contractAddress","type":"address"}],"name":"touchContract","outputs":[{"internalType":"bytes32","name":"codeHash","type":"bytes32"}],"stateMutability":"view","type":"function"}] \ No newline at end of file From 2d3c57aa60e6379f1d21988306ec3ea1c1ccabba Mon Sep 17 00:00:00 2001 From: Wasif Iqbal Date: Wed, 27 Sep 2023 16:54:28 -0500 Subject: [PATCH 46/46] Remove unused code --- miner/algo_common.go | 97 -------------------------------------------- 1 file changed, 97 deletions(-) diff --git a/miner/algo_common.go b/miner/algo_common.go index 70bf3fe65f..8e4cdfeab8 100644 --- a/miner/algo_common.go +++ b/miner/algo_common.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers/logger" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" ) @@ -330,102 +329,6 @@ func insertPayoutTx(env *environment, sender, receiver common.Address, gas uint6 return nil, err } -// BuildMultiTxSnapBlock attempts to build a block with input orders using state.MultiTxSnapshot. If a failure occurs attempting to commit a given order, -// it reverts to previous state and the next order is attempted. -func BuildMultiTxSnapBlock( - inputEnvironment *environment, - key *ecdsa.PrivateKey, - chData chainData, - algoConf algorithmConfig, - orders *types.TransactionsByPriceAndNonce) ([]types.SimulatedBundle, []types.UsedSBundle, error) { - // NOTE(wazzymandias): BuildMultiTxSnapBlock uses envChanges which is different from envDiff struct. - // Eventually the structs should be consolidated but for now they represent the difference between using state - // copies for building blocks (envDiff) versus using MultiTxSnapshot (envChanges). - var ( - usedBundles []types.SimulatedBundle - usedSbundles []types.UsedSBundle - orderFailed bool - buildBlockErrors []error - ) - - changes, err := newEnvChanges(inputEnvironment) - if err != nil { - return nil, nil, err - } - opMap := map[bool]func() error{ - true: changes.env.state.MultiTxSnapshotRevert, - false: changes.env.state.MultiTxSnapshotCommit, - } - - for { - order := orders.Peek() - if order == nil { - break - } - - orderFailed = false - // if snapshot cannot be instantiated, return early - if err = changes.env.state.NewMultiTxSnapshot(); err != nil { - log.Error("Failed to create snapshot", "err", err) - return nil, nil, err - } - - if tx := order.Tx(); tx != nil { - _, skip, err := changes.commitTx(tx, chData) - switch skip { - case shiftTx: - orders.Shift() - case popTx: - orders.Pop() - } - - if err != nil { - buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to commit tx: %w", err)) - orderFailed = true - } - } else if bundle := order.Bundle(); bundle != nil { - err = changes.commitBundle(bundle, chData, algoConf) - orders.Pop() - if err != nil { - log.Trace("Could not apply bundle", "bundle", bundle.OriginalBundle.Hash, "err", err) - buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to commit bundle: %w", err)) - orderFailed = true - } else { - usedBundles = append(usedBundles, *bundle) - } - } else if sbundle := order.SBundle(); sbundle != nil { - err = changes.CommitSBundle(sbundle, chData, key, algoConf) - usedEntry := types.UsedSBundle{ - Bundle: sbundle.Bundle, - Success: err == nil, - } - if err != nil { - log.Trace("Could not apply sbundle", "bundle", sbundle.Bundle.Hash(), "err", err) - - buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to commit sbundle: %w", err)) - orderFailed = true - } - usedSbundles = append(usedSbundles, usedEntry) - } else { - // note: this should never happen because we should not be inserting invalid transaction types into - // the orders heap - panic("unsupported order type found") - } - - if err = opMap[orderFailed](); err != nil { - log.Error("Failed to apply changes with multi-transaction snapshot", "err", err) - buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to apply changes: %w", err)) - } - } - - if err = changes.apply(); err != nil { - log.Error("Failed to apply changes with multi-transaction snapshot", "err", err) - buildBlockErrors = append(buildBlockErrors, fmt.Errorf("failed to apply changes: %w", err)) - } - - return usedBundles, usedSbundles, errors.Join(buildBlockErrors...) -} - // CheckRetryOrderAndReinsert checks if the order has been retried up to the retryLimit and if not, reinserts the order into the orders heap. func CheckRetryOrderAndReinsert( order *types.TxWithMinerFee, orders *types.TransactionsByPriceAndNonce,