diff --git a/cmd/rpcdaemon/commands/zkevm_counters.go b/cmd/rpcdaemon/commands/zkevm_counters.go index 74d57773019..627521e3d84 100644 --- a/cmd/rpcdaemon/commands/zkevm_counters.go +++ b/cmd/rpcdaemon/commands/zkevm_counters.go @@ -160,7 +160,7 @@ func (zkapi *ZkEvmAPIImpl) EstimateCounters(ctx context.Context, rpcTx *zkevmRPC smtDepth := smt.GetDepth() - txCounters := vm.NewTransactionCounter(tx, int(smtDepth), uint16(forkId), zkapi.config.Zk.VirtualCountersSmtReduction, false) + txCounters := vm.NewTransactionCounter(tx, int(smtDepth), uint16(forkId), zkapi.config.Zk.VirtualCountersSmtReduction, false, false) batchCounters := vm.NewBatchCounterCollector(int(smtDepth), uint16(forkId), zkapi.config.Zk.VirtualCountersSmtReduction, false, nil) _, err = batchCounters.AddNewTransactionCounters(txCounters) @@ -369,7 +369,7 @@ func (api *ZkEvmAPIImpl) TraceTransactionCounters(ctx context.Context, hash comm return err } - txCounters := vm.NewTransactionCounter(txn, int(smtDepth), uint16(forkId), api.config.Zk.VirtualCountersSmtReduction, false) + txCounters := vm.NewTransactionCounter(txn, int(smtDepth), uint16(forkId), api.config.Zk.VirtualCountersSmtReduction, false, false) batchCounters := vm.NewBatchCounterCollector(int(smtDepth), uint16(forkId), api.config.Zk.VirtualCountersSmtReduction, false, nil) if _, err = batchCounters.AddNewTransactionCounters(txCounters); err != nil { @@ -551,7 +551,7 @@ func (api *ZkEvmAPIImpl) execTransaction( msg core.Message execResult *core.ExecutionResult ) - txCounters := vm.NewTransactionCounter(tx, smtDepth, forkId, api.config.Zk.VirtualCountersSmtReduction, false) + txCounters := vm.NewTransactionCounter(tx, smtDepth, forkId, api.config.Zk.VirtualCountersSmtReduction, false, false) if _, err = batchCounters.AddNewTransactionCounters(txCounters); err != nil { return 0, err diff --git a/cmd/txpool/main.go b/cmd/txpool/main.go index 27894b1d3ad..4e28109e2c0 100644 --- a/cmd/txpool/main.go +++ b/cmd/txpool/main.go @@ -57,15 +57,18 @@ var ( commitEvery time.Duration // For X Layer - enableWhiteList bool - whiteList []string - blockList []string - freeClaimGasAddrs []string - gasPriceMultiple uint64 - enableFreeGasByNonce bool - freeGasExAddrs []string - freeGasCountPerAddr uint64 - freeGasLimit uint64 + enableWhiteList bool + whiteList []string + blockList []string + freeClaimGasAddrs []string + gasPriceMultiple uint64 + enableFreeGasByNonce bool + freeGasExAddrs []string + freeGasCountPerAddr uint64 + freeGasLimit uint64 + okPayAccountList []string + okPayGasLimitPerBlock uint64 + okPayCounterLimitPercentage uint ) func init() { @@ -99,6 +102,9 @@ func init() { rootCmd.Flags().StringSliceVar(&freeGasExAddrs, utils.TxPoolFreeGasExAddrs.Name, ethconfig.DeprecatedDefaultTxPoolConfig.FreeGasExAddrs, utils.TxPoolFreeGasExAddrs.Usage) rootCmd.PersistentFlags().Uint64Var(&freeGasCountPerAddr, utils.TxPoolFreeGasCountPerAddr.Name, ethconfig.DeprecatedDefaultTxPoolConfig.FreeGasCountPerAddr, utils.TxPoolFreeGasCountPerAddr.Usage) rootCmd.PersistentFlags().Uint64Var(&freeGasLimit, utils.TxPoolFreeGasLimit.Name, ethconfig.DeprecatedDefaultTxPoolConfig.FreeGasLimit, utils.TxPoolFreeGasLimit.Usage) + rootCmd.Flags().Uint64Var(&okPayGasLimitPerBlock, utils.TxPoolOkPayGasLimitPerBlock.Name, 0, utils.TxPoolOkPayGasLimitPerBlock.Usage) + rootCmd.Flags().StringSliceVar(&okPayAccountList, utils.TxPoolOkPayAccountList.Name, []string{}, utils.TxPoolOkPayAccountList.Usage) + rootCmd.Flags().UintVar(&okPayCounterLimitPercentage, utils.TxPoolOkPayCounterLimitPercentage.Name, 50, utils.TxPoolOkPayCounterLimitPercentage.Usage) } var rootCmd = &cobra.Command{ @@ -204,6 +210,14 @@ func doTxpool(ctx context.Context) error { ethCfg.DeprecatedTxPool.FreeGasCountPerAddr = freeGasCountPerAddr ethCfg.DeprecatedTxPool.FreeGasLimit = freeGasLimit + ethCfg.DeprecatedTxPool.OkPayAccountList = make([]string, len(okPayAccountList)) + for i, addrHex := range okPayAccountList { + addr := common.HexToAddress(addrHex) + ethCfg.DeprecatedTxPool.OkPayAccountList[i] = addr.String() + } + ethCfg.DeprecatedTxPool.OkPayGasLimitPerBlock = okPayGasLimitPerBlock + ethCfg.DeprecatedTxPool.OkPayCounterLimitPercentage = okPayCounterLimitPercentage + newTxs := make(chan types.Announcements, 1024) defer close(newTxs) txPoolDB, txPool, fetch, send, txpoolGrpcServer, err := txpooluitl.AllComponents(ctx, cfg, ethCfg, diff --git a/cmd/utils/flags_xlayer.go b/cmd/utils/flags_xlayer.go index 5424b21bdcf..1f32e81479a 100644 --- a/cmd/utils/flags_xlayer.go +++ b/cmd/utils/flags_xlayer.go @@ -94,6 +94,21 @@ var ( Name: "txpool.freegaslimit", Usage: "FreeGasLimit is the max gas allowed use to do a free gas tx", } + TxPoolOkPayAccountList = cli.StringFlag{ + Name: "txpool.okpay-account-list", + Usage: "Comma separated list of addresses, who send ok pay tx", + Value: "", + } + TxPoolOkPayGasLimitPerBlock = cli.Uint64Flag{ + Name: "txpool.okpay-gaslimit-per-block", + Usage: "the block max gas limit for ok pay tx", + Value: 0, + } + TxPoolOkPayCounterLimitPercentage = cli.UintFlag{ + Name: "txpool.okpay-counter-limit-percentage", + Usage: "okpaytx's percentage of counter limit", + Value: 50, + } // Gas Pricer GpoTypeFlag = cli.StringFlag{ Name: "gpo.type", @@ -308,6 +323,21 @@ func setTxPoolXLayer(ctx *cli.Context, cfg *ethconfig.DeprecatedTxPoolConfig) { if ctx.IsSet(TxPoolFreeGasLimit.Name) { cfg.FreeGasLimit = ctx.Uint64(TxPoolFreeGasLimit.Name) } + if ctx.IsSet(TxPoolOkPayAccountList.Name) { + // Parse the command separated flag + addrHexes := SplitAndTrim(ctx.String(TxPoolOkPayAccountList.Name)) + cfg.OkPayAccountList = make([]string, len(addrHexes)) + for i, senderHex := range addrHexes { + sender := libcommon.HexToAddress(senderHex) + cfg.OkPayAccountList[i] = sender.String() + } + } + if ctx.IsSet(TxPoolOkPayGasLimitPerBlock.Name) { + cfg.OkPayGasLimitPerBlock = ctx.Uint64(TxPoolOkPayGasLimitPerBlock.Name) + } + if ctx.IsSet((TxPoolOkPayCounterLimitPercentage.Name)) { + cfg.OkPayCounterLimitPercentage = ctx.Uint(TxPoolOkPayCounterLimitPercentage.Name) + } } // SetApolloGPOXLayer is a public wrapper function to internally call setGPO diff --git a/core/vm/zk_batch_counters.go b/core/vm/zk_batch_counters.go index 16dc10bb74f..9491fb9e3e0 100644 --- a/core/vm/zk_batch_counters.go +++ b/core/vm/zk_batch_counters.go @@ -26,6 +26,11 @@ type BatchCounterCollector struct { rlpCombinedCountersCache Counters executionCombinedCountersCache Counters processingCombinedCountersCache Counters + + // For X Layer + okPayRlpCombinedCounters Counters + okPayExecutionCombinedCounters Counters + okPayProcessingCombinedCounters Counters } func NewBatchCounterCollector(smtMaxLevel int, forkId uint16, mcpReduction float64, unlimitedCounters bool, addonCounters *Counters) *BatchCounterCollector { @@ -45,6 +50,10 @@ func NewBatchCounterCollector(smtMaxLevel int, forkId uint16, mcpReduction float bcc.executionCombinedCounters = bcc.NewCounters() bcc.processingCombinedCounters = bcc.NewCounters() + bcc.okPayRlpCombinedCounters = bcc.NewCounters() + bcc.okPayExecutionCombinedCounters = bcc.NewCounters() + bcc.okPayProcessingCombinedCounters = bcc.NewCounters() + return &bcc } @@ -73,6 +82,10 @@ func (bcc *BatchCounterCollector) Clone() *BatchCounterCollector { rlpCombinedCounters: bcc.rlpCombinedCounters.Clone(), executionCombinedCounters: bcc.executionCombinedCounters.Clone(), processingCombinedCounters: bcc.processingCombinedCounters.Clone(), + + okPayRlpCombinedCounters: bcc.okPayRlpCombinedCounters.Clone(), + okPayExecutionCombinedCounters: bcc.okPayExecutionCombinedCounters.Clone(), + okPayProcessingCombinedCounters: bcc.okPayProcessingCombinedCounters.Clone(), } } @@ -274,15 +287,24 @@ func (bcc *BatchCounterCollector) CombineCollectorsNoChanges() Counters { func (bcc *BatchCounterCollector) UpdateRlpCountersCache(txCounters *TransactionCounter) { for k, v := range txCounters.rlpCounters.counters { bcc.rlpCombinedCounters[k].used += v.used + if txCounters.isOkPayTx { + bcc.okPayRlpCombinedCounters[k].used += v.used + } } } func (bcc *BatchCounterCollector) UpdateExecutionAndProcessingCountersCache(txCounters *TransactionCounter) { for k, v := range txCounters.executionCounters.counters { bcc.executionCombinedCounters[k].used += v.used + if txCounters.isOkPayTx { + bcc.okPayExecutionCombinedCounters[k].used += v.used + } } for k, v := range txCounters.processingCounters.counters { bcc.processingCombinedCounters[k].used += v.used + if txCounters.isOkPayTx { + bcc.okPayProcessingCombinedCounters[k].used += v.used + } } } diff --git a/core/vm/zk_batch_counters_xlayer.go b/core/vm/zk_batch_counters_xlayer.go new file mode 100644 index 00000000000..52b8ab8acd7 --- /dev/null +++ b/core/vm/zk_batch_counters_xlayer.go @@ -0,0 +1,35 @@ +package vm + +import ( + "fmt" + "github.com/ledgerwatch/log/v3" +) + +// CheckOkPayForOverflow returns true in the case that any counter has less than 0 remaining +func (bcc *BatchCounterCollector) CheckOkPayForOverflow(okPayCounterLimitPercentage uint) (bool, error) { + combined := bcc.NewCounters() + for k, _ := range combined { + val := bcc.rlpCombinedCounters[k].used + bcc.executionCombinedCounters[k].used + bcc.processingCombinedCounters[k].used + combined[k].used += val + combined[k].remaining -= val + } + + overflow := false + for _, v := range combined { + if v.initialAmount*int(okPayCounterLimitPercentage)/100 < v.used { + log.Info("[VCOUNTER] OkPay Counter overflow detected", "counter", v.name, "remaining", v.remaining, "used", v.used) + overflow = true + } + } + + // if we have an overflow we want to log the counters for debugging purposes + if overflow { + logText := "[VCOUNTER] Counters stats" + for _, v := range combined { + logText += fmt.Sprintf(" %s: initial: %v used: %v (remaining: %v)", v.name, v.initialAmount, v.used, v.remaining) + } + log.Info(logText) + } + + return overflow, nil +} diff --git a/core/vm/zk_transaction_counters.go b/core/vm/zk_transaction_counters.go index c27388b9b83..d357579af0a 100644 --- a/core/vm/zk_transaction_counters.go +++ b/core/vm/zk_transaction_counters.go @@ -18,9 +18,11 @@ type TransactionCounter struct { smtLevels int forkId uint16 l2DataCache []byte + + isOkPayTx bool // For X Layer } -func NewTransactionCounter(transaction types.Transaction, smtMaxLevel int, forkId uint16, mcpReduction float64, shouldCountersBeUnlimited bool) *TransactionCounter { +func NewTransactionCounter(transaction types.Transaction, smtMaxLevel int, forkId uint16, mcpReduction float64, shouldCountersBeUnlimited bool, isOkPayTx bool) *TransactionCounter { totalLevel := calculateSmtLevels(smtMaxLevel, 32, mcpReduction) var tc *TransactionCounter @@ -33,6 +35,7 @@ func NewTransactionCounter(transaction types.Transaction, smtMaxLevel int, forkI processingCounters: NewUnlimitedCounterCollector(), smtLevels: 1, // max depth of the tree anyways forkId: forkId, + isOkPayTx: isOkPayTx, } } else { tc = &TransactionCounter{ @@ -42,6 +45,7 @@ func NewTransactionCounter(transaction types.Transaction, smtMaxLevel int, forkI processingCounters: NewCounterCollector(totalLevel, forkId), smtLevels: totalLevel, forkId: forkId, + isOkPayTx: isOkPayTx, } } tc.executionCounters.SetTransaction(transaction) diff --git a/eth/ethconfig/tx_pool.go b/eth/ethconfig/tx_pool.go index 4f2f505043a..68450d637c6 100644 --- a/eth/ethconfig/tx_pool.go +++ b/eth/ethconfig/tx_pool.go @@ -63,6 +63,12 @@ type DeprecatedTxPoolConfig struct { FreeGasCountPerAddr uint64 // FreeGasLimit is the max gas allowed use to do a free gas tx FreeGasLimit uint64 + // okPayAccountList is the ok pay bundler accounts address + OkPayAccountList []string + // OkPayGasLimitPerBlock is the block max gas limit for ok pay tx + OkPayGasLimitPerBlock uint64 + // OkPayCounterLimitPercentage is okpaytx's percentage of counter limit + OkPayCounterLimitPercentage uint } // DeprecatedDefaultTxPoolConfig contains the default configurations for the transaction diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index d2a74cafde5..311fd7aac8c 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -211,7 +211,7 @@ func getNextTransactions( counter := 0 for !onTime && counter < 1000 { remainingGas := header.GasLimit - header.GasUsed - if onTime, count, err = cfg.txPool2.YieldBest(amount, &txSlots, poolTx, executionAt, remainingGas, alreadyYielded); err != nil { + if onTime, count, err = cfg.txPool2.YieldBest(amount, &txSlots, poolTx, executionAt, remainingGas, alreadyYielded, false); err != nil { return err } time.Sleep(1 * time.Millisecond) diff --git a/test/config/test.erigon.seq.config.yaml b/test/config/test.erigon.seq.config.yaml index 20acb38cb01..4ad3e89ab8e 100644 --- a/test/config/test.erigon.seq.config.yaml +++ b/test/config/test.erigon.seq.config.yaml @@ -71,6 +71,9 @@ txpool.gaspricemultiple : 2 txpool.blockedlist: ["0xdD2FD4581271e230360230F9337D5c0430Bf44C0"] txpool.enablefreegasbynonce : true txpool.freegascountperaddr : 100 +txpool.okpay-gaslimit-per-block: 100000 +txpool.okpay-counter-limit-percentage: 80 +txpool.okpay-account-list: ["0x36710Da8612C168702bf4F10f5ff089147a1Ba78"] gpo.type: "follower" gpo.update-period: "2s" diff --git a/test/e2e/smoke_test.go b/test/e2e/smoke_test.go index 8e810578bfa..aa46bf5c7d0 100644 --- a/test/e2e/smoke_test.go +++ b/test/e2e/smoke_test.go @@ -493,3 +493,61 @@ func sendBridgeAsset( const txTimeout = 60 * time.Second return operations.WaitTxToBeMined(ctx, c, tx, txTimeout) } + +var ( + okPayAddr = "0x36710Da8612C168702bf4F10f5ff089147a1Ba78" + okPayPriKey = "5f329974c35134e12a3b68c3ca1c3f52d96c70f11fae0b0074188067a8906064" +) + +func TestOkPayTx(t *testing.T) { + ctx := context.Background() + client, err := ethclient.Dial(operations.DefaultL2NetworkURL) + require.NoError(t, err) + + //prepare balance for okPayAddr + transToken(t, ctx, client, uint256.NewInt(21000000*encoding.Gwei), okPayAddr) + + // build and send ok pay tx + okPayTx := buildAndSendTransTokenTx(t, ctx, client, okPayPriKey, operations.DefaultL2AdminAddress, uint256.NewInt(0)) + err = operations.WaitTxToBeMined(ctx, client, okPayTx, operations.DefaultTimeoutTxToBeMined) + require.NoError(t, err) + //require.Error(t, err, "context deadline exceeded") // txpool.okpay-counter-limit-percentage=0 +} + +func buildAndSendTransTokenTx(t *testing.T, ctx context.Context, client *ethclient.Client, privateKeyStr string, toAddress string, amount *uint256.Int) types.Transaction { + auth, err := operations.GetAuth(privateKeyStr, operations.DefaultL2ChainID) + nonce, err := client.PendingNonceAt(ctx, auth.From) + //gasPrice, err := client.SuggestGasPrice(ctx) + gasPrice, err := operations.GetMinGasPrice() + require.NoError(t, err) + + to := common.HexToAddress(toAddress) + gas, err := client.EstimateGas(ctx, ethereum.CallMsg{ + From: auth.From, + To: &to, + Value: amount, + }) + require.NoError(t, err) + + var tx types.Transaction = &types.LegacyTx{ + CommonTx: types.CommonTx{ + Nonce: nonce, + To: &to, + Gas: gas, + Value: amount, + }, + GasPrice: uint256.NewInt(gasPrice), + } + + privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(privateKeyStr, "0x")) + require.NoError(t, err) + + signer := types.MakeSigner(operations.GetTestChainConfig(operations.DefaultL2ChainID), 1) + signedTx, err := types.SignTx(tx, *signer, privateKey) + require.NoError(t, err) + + err = client.SendTransaction(ctx, signedTx) + require.NoError(t, err) + + return signedTx +} diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 2cdb5e253d8..aba87b62029 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -272,4 +272,7 @@ var DefaultFlags = []cli.Flag{ &utils.TxPoolFreeGasLimit, &utils.HTTPApiKeysFlag, &utils.MethodRateLimitFlag, + &utils.TxPoolOkPayAccountList, + &utils.TxPoolOkPayGasLimitPerBlock, + &utils.TxPoolOkPayCounterLimitPercentage, } diff --git a/turbo/transactions/call.go b/turbo/transactions/call.go index 21ddf13fd4c..fa43183f18b 100644 --- a/turbo/transactions/call.go +++ b/turbo/transactions/call.go @@ -256,12 +256,12 @@ func NewReusableCaller( msg.GasPrice(), msg.Data(), ) - + var batchCounters *vm.BatchCounterCollector var counterCollector *vm.CounterCollector if useCounters { batchCounters = vm.NewBatchCounterCollector(smtDepth, uint16(forkId), VirtualCountersSmtReduction, false, nil) - txCounters := vm.NewTransactionCounter(transaction, smtDepth, uint16(forkId), VirtualCountersSmtReduction, false) + txCounters := vm.NewTransactionCounter(transaction, smtDepth, uint16(forkId), VirtualCountersSmtReduction, false, false) _, err = batchCounters.AddNewTransactionCounters(txCounters) if err != nil { diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 9b3e0d79319..c7641e4640c 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -198,6 +198,8 @@ func SpawnSequencingStage( log.Info(fmt.Sprintf("[%s] Waiting for txs from the pool...", logPrefix)) } + okPayPriority := true // For X Layer + LOOP_TRANSACTIONS: for { select { @@ -222,7 +224,7 @@ func SpawnSequencingStage( } } else if !batchState.isL1Recovery() { var allConditionsOK bool - batchState.blockState.transactionsForInclusion, allConditionsOK, err = getNextPoolTransactions(ctx, cfg, executionAt, batchState.forkId, batchState.yieldedTransactions) + batchState.blockState.transactionsForInclusion, allConditionsOK, err = getNextPoolTransactions(ctx, cfg, executionAt, batchState.forkId, batchState.yieldedTransactions, okPayPriority) if err != nil { return err } @@ -244,7 +246,8 @@ func SpawnSequencingStage( // The copying of this structure is intentional backupDataSizeChecker := *blockDataSizeChecker - receipt, execResult, anyOverflow, err := attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, batchState.isL1Recovery(), batchState.forkId, l1TreeUpdateIndex, &backupDataSizeChecker) + + receipt, execResult, anyOverflow, okPayOverflow, err := attemptAddTransaction(cfg, sdb, ibs, batchCounters, &blockContext, header, transaction, effectiveGas, batchState.isL1Recovery(), batchState.forkId, l1TreeUpdateIndex, &backupDataSizeChecker, okPayPriority, cfg.txPool.OkPayCounterLimitPercentage()) if err != nil { if batchState.isLimboRecovery() { panic("limbo transaction has already been executed once so they must not fail while re-executing") @@ -293,6 +296,11 @@ func SpawnSequencingStage( } + if okPayOverflow { + okPayPriority = false + continue + } + if err == nil { blockDataSizeChecker = &backupDataSizeChecker batchState.onAddedTransaction(transaction, receipt, execResult, effectiveGas) @@ -397,7 +405,7 @@ func SpawnSequencingStage( // For X Layer tryToSleepSequencer(cfg.zk.XLayer.SequencerBatchSleepDuration, logPrefix) - + // TODO: It is 99% sure that there is no need to write this in any of processInjectedInitialBatch, alignExecutionToDatastream, doCheckForBadBatch but it is worth double checknig // the unwind of this value is handed by UnwindExecutionStageDbWrites if _, err := rawdb.IncrementStateVersionByBlockNumberIfNeeded(batchContext.sdb.tx, block.NumberU64()); err != nil { diff --git a/zk/stages/stage_sequence_execute_injected_batch.go b/zk/stages/stage_sequence_execute_injected_batch.go index e1917b7748a..28e83ea096f 100644 --- a/zk/stages/stage_sequence_execute_injected_batch.go +++ b/zk/stages/stage_sequence_execute_injected_batch.go @@ -107,7 +107,7 @@ func handleInjectedBatch( // process the tx and we can ignore the counters as an overflow at this stage means no network anyway effectiveGas := DeriveEffectiveGasPrice(*batchContext.cfg, decodedBlocks[0].Transactions[0]) - receipt, execResult, _, err := attemptAddTransaction(*batchContext.cfg, batchContext.sdb, ibs, batchCounters, blockContext, header, decodedBlocks[0].Transactions[0], effectiveGas, false, forkId, 0 /* use 0 for l1InfoIndex in injected batch */, nil) + receipt, execResult, _, _, err := attemptAddTransaction(*batchContext.cfg, batchContext.sdb, ibs, batchCounters, blockContext, header, decodedBlocks[0].Transactions[0], effectiveGas, false, forkId, 0 /* use 0 for l1InfoIndex in injected batch */, nil, false, 0) if err != nil { return nil, nil, nil, 0, err } diff --git a/zk/stages/stage_sequence_execute_transactions.go b/zk/stages/stage_sequence_execute_transactions.go index 97851c145d3..88fb9113cc9 100644 --- a/zk/stages/stage_sequence_execute_transactions.go +++ b/zk/stages/stage_sequence_execute_transactions.go @@ -21,7 +21,7 @@ import ( "github.com/ledgerwatch/log/v3" ) -func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executionAt, forkId uint64, alreadyYielded mapset.Set[[32]byte]) ([]types.Transaction, bool, error) { +func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executionAt, forkId uint64, alreadyYielded mapset.Set[[32]byte], okPayPriority bool) ([]types.Transaction, bool, error) { cfg.txPool.LockFlusher() defer cfg.txPool.UnlockFlusher() @@ -33,7 +33,8 @@ func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executio if err := cfg.txPoolDb.View(ctx, func(poolTx kv.Tx) error { slots := types2.TxsRlp{} - if allConditionsOk, _, err = cfg.txPool.YieldBest(cfg.yieldSize, &slots, poolTx, executionAt, gasLimit, alreadyYielded); err != nil { + + if allConditionsOk, _, err = cfg.txPool.YieldBest(cfg.yieldSize, &slots, poolTx, executionAt, gasLimit, alreadyYielded, okPayPriority); err != nil { return err } yieldedTxs, err := extractTransactionsFromSlot(&slots) @@ -110,18 +111,31 @@ func attemptAddTransaction( l1Recovery bool, forkId, l1InfoIndex uint64, blockDataSizeChecker *BlockDataChecker, -) (*types.Receipt, *core.ExecutionResult, bool, error) { - var batchDataOverflow, overflow bool + okPayPriority bool, + okPayCounterLimitPercentage uint, +) (*types.Receipt, *core.ExecutionResult, bool, bool, error) { + var batchDataOverflow, overflow, okPayOverflow bool var err error - txCounters := vm.NewTransactionCounter(transaction, sdb.smt.GetDepth(), uint16(forkId), cfg.zk.VirtualCountersSmtReduction, cfg.zk.ShouldCountersBeUnlimited(l1Recovery)) + // For X Layer: check ok pay tx counter overflow + sender, ok := transaction.GetSender() + if !ok { + signer := types.MakeSigner(cfg.chainConfig, header.Number.Uint64()) + sender, err = transaction.Sender(*signer) + if err != nil { + return nil, nil, false, false, err + } + } + isOkPayTx := cfg.txPool.IsOkPayAddr(sender) + + txCounters := vm.NewTransactionCounter(transaction, sdb.smt.GetDepth(), uint16(forkId), cfg.zk.VirtualCountersSmtReduction, cfg.zk.ShouldCountersBeUnlimited(l1Recovery), isOkPayTx) overflow, err = batchCounters.AddNewTransactionCounters(txCounters) // run this only once the first time, do not add it on rerun if blockDataSizeChecker != nil { txL2Data, err := txCounters.GetL2DataCache() if err != nil { - return nil, nil, false, err + return nil, nil, false, false, err } batchDataOverflow = blockDataSizeChecker.AddTransactionData(txL2Data) if batchDataOverflow { @@ -129,11 +143,11 @@ func attemptAddTransaction( } } if err != nil { - return nil, nil, false, err + return nil, nil, false, false, err } anyOverflow := overflow || batchDataOverflow if anyOverflow && !l1Recovery { - return nil, nil, true, nil + return nil, nil, true, false, nil } gasPool := new(core.GasPool).AddGas(transactionGasLimit) @@ -164,22 +178,34 @@ func attemptAddTransaction( ) if err != nil { - return nil, nil, false, err + return nil, nil, false, false, err } if err = txCounters.ProcessTx(ibs, execResult.ReturnData); err != nil { - return nil, nil, false, err + return nil, nil, false, false, err } batchCounters.UpdateExecutionAndProcessingCountersCache(txCounters) // now that we have executed we can check again for an overflow if overflow, err = batchCounters.CheckForOverflow(l1InfoIndex != 0); err != nil { - return nil, nil, false, err + return nil, nil, false, false, err } if overflow { ibs.RevertToSnapshot(snapshot) - return nil, nil, true, nil + return nil, nil, true, false, nil + } + + // For X Layer: check ok pay tx counter overflow + if isOkPayTx && okPayPriority { + // now that we have executed we can check again for an overflow + if okPayOverflow, err = batchCounters.CheckOkPayForOverflow(okPayCounterLimitPercentage); err != nil { + return nil, nil, false, false, err + } + if okPayOverflow { + ibs.RevertToSnapshot(snapshot) + return nil, nil, false, true, nil + } } // add the gas only if not reverted. This should not be moved above the overflow check @@ -188,10 +214,10 @@ func attemptAddTransaction( // we need to keep hold of the effective percentage used // todo [zkevm] for now we're hard coding to the max value but we need to calc this properly if err = sdb.hermezDb.WriteEffectiveGasPricePercentage(transaction.Hash(), effectiveGasPrice); err != nil { - return nil, nil, false, err + return nil, nil, false, false, err } ibs.FinalizeTx(evm.ChainRules(), noop) - return receipt, execResult, false, nil + return receipt, execResult, false, false, nil } diff --git a/zk/tests/zk_counters_test.go b/zk/tests/zk_counters_test.go index 4a131b5c39e..a674e9d0214 100644 --- a/zk/tests/zk_counters_test.go +++ b/zk/tests/zk_counters_test.go @@ -300,7 +300,7 @@ func runTest(t *testing.T, test vector, err error, fileName string, idx int) { } blockStarted = true } - txCounters := vm.NewTransactionCounter(transaction, test.SmtDepths[i], uint16(test.ForkId), 0.6, false) + txCounters := vm.NewTransactionCounter(transaction, test.SmtDepths[i], uint16(test.ForkId), 0.6, false, false) overflow, err := batchCollector.AddNewTransactionCounters(txCounters) if err != nil { t.Fatal(err) diff --git a/zk/txpool/pool.go b/zk/txpool/pool.go index f7e03bd87dc..1c836bfc2ac 100644 --- a/zk/txpool/pool.go +++ b/zk/txpool/pool.go @@ -398,15 +398,18 @@ func New(newTxs chan types.Announcements, coreDB kv.RoDB, cfg txpoolcfg.Config, limbo: newLimbo(), // X Layer config xlayerCfg: XLayerConfig{ - EnableWhitelist: ethCfg.DeprecatedTxPool.EnableWhitelist, - WhiteList: ethCfg.DeprecatedTxPool.WhiteList, - BlockedList: ethCfg.DeprecatedTxPool.BlockedList, - FreeClaimGasAddrs: ethCfg.DeprecatedTxPool.FreeClaimGasAddrs, - GasPriceMultiple: ethCfg.DeprecatedTxPool.GasPriceMultiple, - EnableFreeGasByNonce: ethCfg.DeprecatedTxPool.EnableFreeGasByNonce, - FreeGasExAddrs: ethCfg.DeprecatedTxPool.FreeGasExAddrs, - FreeGasCountPerAddr: ethCfg.DeprecatedTxPool.FreeGasCountPerAddr, - FreeGasLimit: ethCfg.DeprecatedTxPool.FreeGasLimit, + EnableWhitelist: ethCfg.DeprecatedTxPool.EnableWhitelist, + WhiteList: ethCfg.DeprecatedTxPool.WhiteList, + BlockedList: ethCfg.DeprecatedTxPool.BlockedList, + FreeClaimGasAddrs: ethCfg.DeprecatedTxPool.FreeClaimGasAddrs, + GasPriceMultiple: ethCfg.DeprecatedTxPool.GasPriceMultiple, + EnableFreeGasByNonce: ethCfg.DeprecatedTxPool.EnableFreeGasByNonce, + FreeGasExAddrs: ethCfg.DeprecatedTxPool.FreeGasExAddrs, + FreeGasCountPerAddr: ethCfg.DeprecatedTxPool.FreeGasCountPerAddr, + FreeGasLimit: ethCfg.DeprecatedTxPool.FreeGasLimit, + OkPayAccountList: ethCfg.DeprecatedTxPool.OkPayAccountList, + OkPayGasLimitPerBlock: ethCfg.DeprecatedTxPool.OkPayGasLimitPerBlock, + OkPayCounterLimitPercentage: ethCfg.DeprecatedTxPool.OkPayCounterLimitPercentage, }, freeGasAddrs: map[string]bool{}, }, nil @@ -700,13 +703,13 @@ func (p *TxPool) ResetYieldedStatus() { } } -func (p *TxPool) YieldBest(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas uint64, toSkip mapset.Set[[32]byte]) (bool, int, error) { - return p.best(n, txs, tx, onTopOf, availableGas, toSkip) +func (p *TxPool) YieldBest(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas uint64, toSkip mapset.Set[[32]byte], okPayPriority bool) (bool, int, error) { + return p.best(n, txs, tx, onTopOf, availableGas, toSkip, okPayPriority) } func (p *TxPool) PeekBest(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas uint64) (bool, error) { set := mapset.NewThreadUnsafeSet[[32]byte]() - onTime, _, err := p.best(n, txs, tx, onTopOf, availableGas, set) + onTime, _, err := p.best(n, txs, tx, onTopOf, availableGas, set, false) return onTime, err } diff --git a/zk/txpool/pool_xlayer.go b/zk/txpool/pool_xlayer.go index 339c3bca315..bd47e108b1e 100644 --- a/zk/txpool/pool_xlayer.go +++ b/zk/txpool/pool_xlayer.go @@ -3,7 +3,12 @@ package txpool import ( "math/big" + mapset "github.com/deckarep/golang-set/v2" "github.com/gateway-fm/cdk-erigon-lib/common" + "github.com/gateway-fm/cdk-erigon-lib/common/fixedgas" + "github.com/gateway-fm/cdk-erigon-lib/kv" + "github.com/gateway-fm/cdk-erigon-lib/types" + "github.com/ledgerwatch/log/v3" ) // XLayerConfig contains the X Layer configs for the txpool @@ -26,6 +31,12 @@ type XLayerConfig struct { FreeGasCountPerAddr uint64 // FreeGasLimit is the max gas allowed use to do a free gas tx FreeGasLimit uint64 + // okPayAccountList is the ok pay bundler accounts address + OkPayAccountList []string + // OkPayGasLimitPerBlock is the block max gas limit for ok pay tx + OkPayGasLimitPerBlock uint64 + // OkPayCounterLimitPercentage is okpaytx's percentage of counter limit + OkPayCounterLimitPercentage uint } type GPCache interface { @@ -81,3 +92,81 @@ func (p *TxPool) isFreeGasXLayer(senderID uint64) bool { free, _ := p.checkFreeGasAddrXLayer(senderID) return free } + +func (p *TxPool) IsOkPayAddr(addr common.Address) bool { + for _, e := range p.xlayerCfg.OkPayAccountList { + if common.HexToAddress(e) == addr { + return true + } + } + return false +} + +func (p *TxPool) bestOkPay(n uint16, txs *types.TxsRlp, tx kv.Tx, isLondon, isShanghai bool, availableGas uint64, toSkip mapset.Set[[32]byte]) (uint64, int, []*metaTx, error) { + var toRemove []*metaTx + best := p.pending.best + count := 0 + + for i := 0; count < int(n) && i < len(best.ms); i++ { + // if we wouldn't have enough gas for a standard transaction then quit out early + if availableGas < fixedgas.TxGas { + break + } + + mt := best.ms[i] + + if toSkip.Contains(mt.Tx.IDHash) { + continue + } + + if !isLondon && mt.Tx.Type == 0x2 { + // remove ldn txs when not in london + toRemove = append(toRemove, mt) + toSkip.Add(mt.Tx.IDHash) + continue + } + + if mt.Tx.Gas >= transactionGasLimit { + // Skip transactions with very large gas limit, these shouldn't enter the pool at all + log.Debug("found a transaction in the pending pool with too high gas for tx - clear the tx pool") + continue + } + rlpTx, sender, isLocal, err := p.getRlpLocked(tx, mt.Tx.IDHash[:]) + if err != nil { + return availableGas, count, toRemove, err + } + if len(rlpTx) == 0 { + toRemove = append(toRemove, mt) + continue + } + + if !p.IsOkPayAddr(sender) { + continue + } + + // make sure we have enough gas in the caller to add this transaction. + // not an exact science using intrinsic gas but as close as we could hope for at + // this stage + intrinsicGas, _ := CalcIntrinsicGas(uint64(mt.Tx.DataLen), uint64(mt.Tx.DataNonZeroLen), nil, mt.Tx.Creation, true, true, isShanghai) + if intrinsicGas > availableGas { + // we might find another TX with a low enough intrinsic gas to include so carry on + continue + } + + if intrinsicGas <= availableGas { // check for potential underflow + availableGas -= intrinsicGas + } + + txs.Txs[count] = rlpTx + copy(txs.Senders.At(count), sender.Bytes()) + txs.IsLocal[count] = isLocal + toSkip.Add(mt.Tx.IDHash) + count++ + } + + return availableGas, count, toRemove, nil +} + +func (p *TxPool) OkPayCounterLimitPercentage() uint { + return p.xlayerCfg.OkPayCounterLimitPercentage +} diff --git a/zk/txpool/pool_zk.go b/zk/txpool/pool_zk.go index b29f08925c8..cbd5c74c5d7 100644 --- a/zk/txpool/pool_zk.go +++ b/zk/txpool/pool_zk.go @@ -201,7 +201,7 @@ func (p *TxPool) onSenderStateChange(senderID uint64, senderNonce uint64, sender // zk: the implementation of best here is changed only to not take into account block gas limits as we don't care about // these in zk. Instead we do a quick check on the transaction maximum gas in zk -func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas uint64, toSkip mapset.Set[[32]byte]) (bool, int, error) { +func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas uint64, toSkip mapset.Set[[32]byte], okPayPriority bool) (bool, int, error) { p.lock.Lock() defer p.lock.Unlock() @@ -225,6 +225,23 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG p.pending.EnforceBestInvariants() + // For X Layer + if okPayPriority { + okPayTxAvailableGas := p.xlayerCfg.OkPayGasLimitPerBlock + if okPayTxAvailableGas > availableGas { + okPayTxAvailableGas = availableGas + } + okPayTxGasRemain, priorityTxCount, okPayTxRemove, err := p.bestOkPay(n, txs, tx, isLondon, isShanghai, okPayTxAvailableGas, toSkip) + if err != nil { + return false, priorityTxCount, err + } + availableGas = availableGas - okPayTxAvailableGas + okPayTxGasRemain + if len(okPayTxRemove) > 0 { + toRemove = append(toRemove, okPayTxRemove...) + } + count += priorityTxCount + } + for i := 0; count < int(n) && i < len(best.ms); i++ { // if we wouldn't have enough gas for a standard transaction then quit out early if availableGas < fixedgas.TxGas {