Skip to content

Commit

Permalink
Merge branch 'master' into latest-tipping-tx
Browse files Browse the repository at this point in the history
  • Loading branch information
magicxyyz committed Oct 31, 2023
2 parents e4d6af3 + 4bdccdd commit ce5b737
Show file tree
Hide file tree
Showing 12 changed files with 63 additions and 19 deletions.
8 changes: 7 additions & 1 deletion arbitrum/apibackend.go
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,13 @@ func (a *APIBackend) GetEVM(ctx context.Context, msg *core.Message, state *state
vmConfig = a.BlockChain().GetVMConfig()
}
txContext := core.NewEVMTxContext(msg)
return vm.NewEVM(*blockCtx, txContext, state, a.BlockChain().Config(), *vmConfig), vmError
var context vm.BlockContext
if blockCtx != nil {
context = *blockCtx
} else {
context = core.NewEVMBlockContext(header, a.BlockChain(), nil)
}
return vm.NewEVM(context, txContext, state, a.BlockChain().Config(), *vmConfig), vmError
}

func (a *APIBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
Expand Down
2 changes: 1 addition & 1 deletion cmd/evm/internal/t8ntool/transaction.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ func Transaction(ctx *cli.Context) error {
r.Error = errors.New("gas * maxFeePerGas exceeds 256 bits")
}
// Check whether the init code size has been exceeded.
if chainConfig.IsShanghai(new(big.Int), 0, 0) && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
if chainConfig.IsShanghai(new(big.Int), 0, 0) && tx.To() == nil && len(tx.Data()) > int(chainConfig.MaxInitCodeSize()) {
r.Error = errors.New("max initcode size exceeded")
}
results = append(results, r)
Expand Down
16 changes: 9 additions & 7 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -1039,15 +1039,15 @@ func (bc *BlockChain) Stop() {
triedb := bc.triedb

for _, offset := range []uint64{0, 1, bc.cacheConfig.TriesInMemory - 1, math.MaxUint64} {
if number := bc.CurrentBlock().Number.Uint64(); number > offset {
if number := bc.CurrentBlock().Number.Uint64(); number > offset || offset == math.MaxUint64 {
var recent *types.Block
if offset == math.MaxUint {
if offset == math.MaxUint64 && !bc.triegc.Empty() {
_, latest := bc.triegc.Peek()
recent = bc.GetBlockByNumber(uint64(-latest))
} else {
recent = bc.GetBlockByNumber(number - offset)
}
if recent.Root() == (common.Hash{}) {
if recent == nil || recent.Root() == (common.Hash{}) {
continue
}

Expand Down Expand Up @@ -1451,7 +1451,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
// If MaxNumberOfBlocksToSkipStateSaving or MaxAmountOfGasToSkipStateSaving is not zero, then flushing of some blocks will be skipped:
// * at most MaxNumberOfBlocksToSkipStateSaving block state commits will be skipped
// * sum of gas used in skipped blocks will be at most MaxAmountOfGasToSkipStateSaving
if bc.cacheConfig.TrieDirtyDisabled {
archiveNode := bc.cacheConfig.TrieDirtyDisabled
if archiveNode {
var maySkipCommiting, blockLimitReached, gasLimitReached bool
if bc.cacheConfig.MaxNumberOfBlocksToSkipStateSaving != 0 {
maySkipCommiting = true
Expand All @@ -1474,10 +1475,10 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
bc.amountOfGasInBlocksToSkipStateSaving = bc.cacheConfig.MaxAmountOfGasToSkipStateSaving
return bc.triedb.Commit(root, false)
}
return nil
// we are skipping saving the trie to diskdb, so we need to keep the trie in memory and garbage collect it later
}

// Full but not archive node, do proper garbage collection
// Full node or archive node that's not keeping all states, do proper garbage collection
bc.triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
bc.triegc.Push(trieGcEntry{root, block.Header().Time}, -int64(block.NumberU64()))

Expand Down Expand Up @@ -1510,7 +1511,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
}
flushInterval := time.Duration(bc.flushInterval.Load())
// If we exceeded out time allowance, flush an entire trie to disk
if bc.gcproc > flushInterval && prevEntry != nil {
// In case of archive node that skips some trie commits we don't flush tries here
if bc.gcproc > flushInterval && prevEntry != nil && !archiveNode {
// If the header is missing (canonical chain behind), we're reorging a low
// diff sidechain. Suspend committing until this operation is completed.
header := bc.GetHeaderByNumber(prevNum)
Expand Down
4 changes: 2 additions & 2 deletions core/state_transition.go
Original file line number Diff line number Diff line change
Expand Up @@ -408,8 +408,8 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
}

// Check whether the init code size has been exceeded.
if rules.IsShanghai && contractCreation && len(msg.Data) > params.MaxInitCodeSize {
return nil, fmt.Errorf("%w: code size %v limit %v", ErrMaxInitCodeSizeExceeded, len(msg.Data), params.MaxInitCodeSize)
if rules.IsShanghai && contractCreation && len(msg.Data) > int(st.evm.ChainConfig().MaxInitCodeSize()) {
return nil, fmt.Errorf("%w: code size %v limit %v", ErrMaxInitCodeSizeExceeded, len(msg.Data), int(st.evm.ChainConfig().MaxInitCodeSize()))
}

// Execute the preparatory steps for state transition which includes:
Expand Down
4 changes: 2 additions & 2 deletions core/txpool/txpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -615,8 +615,8 @@ func (pool *TxPool) validateTxBasics(tx *types.Transaction, local bool) error {
return ErrOversizedData
}
// Check whether the init code size has been exceeded.
if pool.shanghai.Load() && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
return fmt.Errorf("%w: code size %v limit %v", core.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize)
if pool.shanghai.Load() && tx.To() == nil && len(tx.Data()) > int(pool.chainconfig.MaxInitCodeSize()) {
return fmt.Errorf("%w: code size %v limit %v", core.ErrMaxInitCodeSizeExceeded, len(tx.Data()), int(pool.chainconfig.MaxInitCodeSize()))
}
// Transactions can't be negative. This may never happen using RLP decoded
// transactions but may occur if you create a transaction using the RPC.
Expand Down
2 changes: 1 addition & 1 deletion core/vm/evm.go
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
ret, err := evm.interpreter.Run(contract, nil, false)

// Check whether the max code size has been exceeded, assign err if the case.
if err == nil && evm.chainRules.IsEIP158 && len(ret) > params.MaxCodeSize {
if err == nil && evm.chainRules.IsEIP158 && len(ret) > int(evm.chainConfig.MaxCodeSize()) {
err = ErrMaxCodeSizeExceeded
}

Expand Down
8 changes: 4 additions & 4 deletions core/vm/gas_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -308,10 +308,10 @@ func gasCreateEip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory, m
return 0, err
}
size, overflow := stack.Back(2).Uint64WithOverflow()
if overflow || size > params.MaxInitCodeSize {
if overflow || size > evm.chainConfig.MaxInitCodeSize() {
return 0, ErrGasUintOverflow
}
// Since size <= params.MaxInitCodeSize, these multiplication cannot overflow
// Since size <= evm.chainConfig.MaxInitCodeSize(), these multiplication cannot overflow
moreGas := params.InitCodeWordGas * ((size + 31) / 32)
if gas, overflow = math.SafeAdd(gas, moreGas); overflow {
return 0, ErrGasUintOverflow
Expand All @@ -324,10 +324,10 @@ func gasCreate2Eip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory,
return 0, err
}
size, overflow := stack.Back(2).Uint64WithOverflow()
if overflow || size > params.MaxInitCodeSize {
if overflow || size > evm.chainConfig.MaxInitCodeSize() {
return 0, ErrGasUintOverflow
}
// Since size <= params.MaxInitCodeSize, these multiplication cannot overflow
// Since size <= evm.chainConfig.MaxInitCodeSize(), these multiplication cannot overflow
moreGas := (params.InitCodeWordGas + params.Keccak256WordGas) * ((size + 31) / 32)
if gas, overflow = math.SafeAdd(gas, moreGas); overflow {
return 0, ErrGasUintOverflow
Expand Down
2 changes: 2 additions & 0 deletions eth/state_accessor.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe
// Create an ephemeral trie.Database for isolating the live one. Otherwise
// the internal junks created by tracing will be persisted into the disk.
database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16})
defer database.TrieDB().ResetCleans()
if statedb, err = state.New(block.Root(), database, nil); err == nil {
log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number())
return statedb, noopReleaser, nil
Expand All @@ -100,6 +101,7 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe
// Create an ephemeral trie.Database for isolating the live one. Otherwise
// the internal junks created by tracing will be persisted into the disk.
database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16})
defer database.TrieDB().ResetCleans()

// If we didn't check the live database, do check state over ephemeral database,
// otherwise we would rewind past a persisted block (specific corner case is
Expand Down
1 change: 1 addition & 0 deletions internal/ethapi/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -1068,6 +1068,7 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash
if err != nil {
return nil, err
}
msg.TxRunMode = runMode
// make a new EVM for the scheduled Tx (an EVM must never be reused)
evm, vmError := b.GetEVM(ctx, msg, state, header, &vm.Config{NoBaseFee: true}, &blockCtx)
go func() {
Expand Down
16 changes: 16 additions & 0 deletions params/config_arbitrum.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ type ArbitrumChainParams struct {
InitialArbOSVersion uint64
InitialChainOwner common.Address
GenesisBlockNum uint64
MaxCodeSize uint64 `json:"MaxCodeSize,omitempty"` // Maximum bytecode to permit for a contract. 0 value implies params.MaxCodeSize
MaxInitCodeSize uint64 `json:"MaxInitCodeSize,omitempty"` // Maximum initcode to permit in a creation transaction and create instructions. 0 value implies params.MaxInitCodeSize
}

func (c *ChainConfig) IsArbitrum() bool {
Expand All @@ -39,6 +41,20 @@ func (c *ChainConfig) IsArbitrumNitro(num *big.Int) bool {
return c.IsArbitrum() && isBlockForked(new(big.Int).SetUint64(c.ArbitrumChainParams.GenesisBlockNum), num)
}

func (c *ChainConfig) MaxCodeSize() uint64 {
if c.ArbitrumChainParams.MaxCodeSize == 0 {
return MaxCodeSize
}
return c.ArbitrumChainParams.MaxCodeSize
}

func (c *ChainConfig) MaxInitCodeSize() uint64 {
if c.ArbitrumChainParams.MaxInitCodeSize == 0 {
return c.MaxCodeSize() * 2
}
return c.ArbitrumChainParams.MaxInitCodeSize
}

func (c *ChainConfig) DebugMode() bool {
return c.ArbitrumChainParams.AllowDebugPrecompiles
}
Expand Down
8 changes: 8 additions & 0 deletions trie/database_wrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ func prepare(diskdb ethdb.Database, config *Config) *Database {
} else {
cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024)
}
runtime.SetFinalizer(cleans, func(c *fastcache.Cache) { c.Reset() })
}
var preimages *preimageStore
if config != nil && config.Preimages {
Expand All @@ -97,6 +98,13 @@ func prepare(diskdb ethdb.Database, config *Config) *Database {
}
}

// resets fastcache to return memory chunks to pool of free chunks
func (db *Database) ResetCleans() {
if db.cleans != nil {
db.cleans.Reset()
}
}

// NewDatabase initializes the trie database with default settings, namely
// the legacy hash-based scheme is used by default.
func NewDatabase(diskdb ethdb.Database) *Database {
Expand Down
11 changes: 10 additions & 1 deletion trie/triedb/hashdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -329,20 +329,25 @@ func (db *Database) Cap(limit common.StorageSize) error {
// outside code doesn't see an inconsistent state (referenced data removed from
// memory cache during commit but not yet in persistent storage). This is ensured
// by only uncaching existing data when the database write finalizes.
nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
start := time.Now()
batch := db.diskdb.NewBatch()
db.lock.RLock()
nodes, storage := len(db.dirties), db.dirtiesSize

// db.dirtiesSize only contains the useful data in the cache, but when reporting
// the total memory consumption, the maintenance metadata is also needed to be
// counted.
size := db.dirtiesSize + common.StorageSize(len(db.dirties)*cachedNodeSize)
size += db.childrenSize
db.lock.RUnlock()

// Keep committing nodes from the flush-list until we're below allowance
oldest := db.oldest
for size > limit && oldest != (common.Hash{}) {
// Fetch the oldest referenced node and push into the batch
db.lock.RLock()
node := db.dirties[oldest]
db.lock.RUnlock()
rawdb.WriteLegacyTrieNode(batch, oldest, node.node)

// If we exceeded the ideal batch size, commit and reset
Expand Down Expand Up @@ -418,7 +423,9 @@ func (db *Database) Commit(node common.Hash, report bool) error {
batch := db.diskdb.NewBatch()

// Move the trie itself into the batch, flushing if enough data is accumulated
db.lock.RLock()
nodes, storage := len(db.dirties), db.dirtiesSize
db.lock.RUnlock()

uncacher := &cleaner{db}
if err := db.commit(node, batch, uncacher); err != nil {
Expand Down Expand Up @@ -460,7 +467,9 @@ func (db *Database) Commit(node common.Hash, report bool) error {
// commit is the private locked version of Commit.
func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner) error {
// If the node does not exist, it's a previously committed node
db.lock.RLock()
node, ok := db.dirties[hash]
db.lock.RUnlock()
if !ok {
return nil
}
Expand Down

0 comments on commit ce5b737

Please sign in to comment.