diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8f622fdb2d..d49139b1bf 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -5,27 +5,4 @@ accounts/usbwallet @karalabe consensus @karalabe core/ @karalabe @holiman eth/ @karalabe -les/ @zsfelfoldi -light/ @zsfelfoldi -mobile/ @karalabe p2p/ @fjl @zsfelfoldi -swarm/bmt @zelig -swarm/dev @lmars -swarm/fuse @jmozah @holisticode -swarm/grafana_dashboards @nonsense -swarm/metrics @nonsense @holisticode -swarm/multihash @nolash -swarm/network/bitvector @zelig @janos @gbalint -swarm/network/priorityqueue @zelig @janos @gbalint -swarm/network/simulations @zelig -swarm/network/stream @janos @zelig @gbalint @holisticode @justelad -swarm/network/stream/intervals @janos -swarm/network/stream/testing @zelig -swarm/pot @zelig -swarm/pss @nolash @zelig @nonsense -swarm/services @zelig -swarm/state @justelad -swarm/storage/encryption @gbalint @zelig @nagydani -swarm/storage/mock @janos -swarm/storage/mru @nolash -swarm/testutil @lmars diff --git a/Makefile b/Makefile index ad901a7401..2945abf25c 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ # with Go source code. If you know what GOPATH is then you probably # don't need to bother with make. -.PHONY: geth android ios evm all test clean +.PHONY: platon evm all test clean GOBIN = $(shell pwd)/build/bin GO ?= latest diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 804ab9b770..c19bf6d948 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -833,6 +833,13 @@ func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*t return fb.bc.GetHeaderByHash(hash), nil } +func (fb *filterBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + if body := fb.bc.GetBody(hash); body != nil { + return body, nil + } + return nil, errors.New("block body not found") +} + func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { return fb.backend.pendingBlock, fb.backend.pendingReceipts } diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go index d48763a3f4..f53e682df7 100644 --- a/accounts/abi/bind/bind.go +++ b/accounts/abi/bind/bind.go @@ -23,7 +23,6 @@ package bind import ( "bytes" - "errors" "fmt" "go/format" "regexp" @@ -40,8 +39,6 @@ type Lang int const ( LangGo Lang = iota - LangJava - LangObjC ) func isKeyWord(arg string) bool { @@ -222,10 +219,6 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] if evmABI.HasReceive() { receive = &tmplMethod{Original: evmABI.Receive} } - // There is no easy way to pass arbitrary java objects to the Go side. - if len(structs) > 0 && lang == LangJava { - return "", errors.New("java binding for tuple arguments is not supported yet") - } contracts[types[i]] = &tmplContract{ Type: capitalise(types[i]), @@ -299,8 +292,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] // bindType is a set of type binders that convert Solidity types to some supported // programming language types. var bindType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ - LangGo: bindTypeGo, - LangJava: bindTypeJava, + LangGo: bindTypeGo, } // bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go ones. @@ -343,86 +335,10 @@ func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { } } -// bindBasicTypeJava converts basic solidity types(except array, slice and tuple) to Java ones. -func bindBasicTypeJava(kind abi.Type) string { - switch kind.T { - case abi.AddressTy: - return "Address" - case abi.IntTy, abi.UintTy: - // Note that uint and int (without digits) are also matched, - // these are size 256, and will translate to BigInt (the default). - parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(kind.String()) - if len(parts) != 3 { - return kind.String() - } - // All unsigned integers should be translated to BigInt since gomobile doesn't - // support them. - if parts[1] == "u" { - return "BigInt" - } - - namedSize := map[string]string{ - "8": "byte", - "16": "short", - "32": "int", - "64": "long", - }[parts[2]] - - // default to BigInt - if namedSize == "" { - namedSize = "BigInt" - } - return namedSize - case abi.FixedBytesTy, abi.BytesTy: - return "byte[]" - case abi.BoolTy: - return "boolean" - case abi.StringTy: - return "String" - case abi.FunctionTy: - return "byte[24]" - default: - return kind.String() - } -} - -// pluralizeJavaType explicitly converts multidimensional types to predefined -// types in go side. -func pluralizeJavaType(typ string) string { - switch typ { - case "boolean": - return "Bools" - case "String": - return "Strings" - case "Address": - return "Addresses" - case "byte[]": - return "Binaries" - case "BigInt": - return "BigInts" - } - return typ + "[]" -} - -// bindTypeJava converts a Solidity type to a Java one. Since there is no clear mapping -// from all Solidity types to Java ones (e.g. uint17), those that cannot be exactly -// mapped will use an upscaled type (e.g. BigDecimal). -func bindTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { - switch kind.T { - case abi.TupleTy: - return structs[kind.TupleRawName+kind.String()].Name - case abi.ArrayTy, abi.SliceTy: - return pluralizeJavaType(bindTypeJava(*kind.Elem, structs)) - default: - return bindBasicTypeJava(kind) - } -} - // bindTopicType is a set of type binders that convert Solidity types to some // supported programming language topic types. var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ - LangGo: bindTopicTypeGo, - LangJava: bindTopicTypeJava, + LangGo: bindTopicTypeGo, } // bindTopicTypeGo converts a Solidity topic type to a Go one. It is almost the same @@ -442,28 +358,10 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { return bound } -// bindTopicTypeJava converts a Solidity topic type to a Java one. It is almost the same -// functionality as for simple types, but dynamic types get converted to hashes. -func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { - bound := bindTypeJava(kind, structs) - - // todo(rjl493456442) according solidity documentation, indexed event - // parameters that are not value types i.e. arrays and structs are not - // stored directly but instead a keccak256-hash of an encoding is stored. - // - // We only convert strings and bytes to hash, still need to deal with - // array(both fixed-size and dynamic-size) and struct. - if bound == "String" || bound == "byte[]" { - bound = "Hash" - } - return bound -} - // bindStructType is a set of type binders that convert Solidity tuple types to some supported // programming language struct definition. var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ - LangGo: bindStructTypeGo, - LangJava: bindStructTypeJava, + LangGo: bindStructTypeGo, } // bindStructTypeGo converts a Solidity tuple type to a Go one and records the mapping @@ -512,74 +410,10 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { } } -// bindStructTypeJava converts a Solidity tuple type to a Java one and records the mapping -// in the given map. -// Notably, this function will resolve and record nested struct recursively. -func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { - switch kind.T { - case abi.TupleTy: - // We compose a raw struct name and a canonical parameter expression - // together here. The reason is before solidity v0.5.11, kind.TupleRawName - // is empty, so we use canonical parameter expression to distinguish - // different struct definition. From the consideration of backward - // compatibility, we concat these two together so that if kind.TupleRawName - // is not empty, it can have unique id. - id := kind.TupleRawName + kind.String() - if s, exist := structs[id]; exist { - return s.Name - } - var fields []*tmplField - for i, elem := range kind.TupleElems { - field := bindStructTypeJava(*elem, structs) - fields = append(fields, &tmplField{Type: field, Name: decapitalise(kind.TupleRawNames[i]), SolKind: *elem}) - } - name := kind.TupleRawName - if name == "" { - name = fmt.Sprintf("Class%d", len(structs)) - } - structs[id] = &tmplStruct{ - Name: name, - Fields: fields, - } - return name - case abi.ArrayTy, abi.SliceTy: - return pluralizeJavaType(bindStructTypeJava(*kind.Elem, structs)) - default: - return bindBasicTypeJava(kind) - } -} - // namedType is a set of functions that transform language specific types to // named versions that may be used inside method names. var namedType = map[Lang]func(string, abi.Type) string{ - LangGo: func(string, abi.Type) string { panic("this shouldn't be needed") }, - LangJava: namedTypeJava, -} - -// namedTypeJava converts some primitive data types to named variants that can -// be used as parts of method names. -func namedTypeJava(javaKind string, solKind abi.Type) string { - switch javaKind { - case "byte[]": - return "Binary" - case "boolean": - return "Bool" - default: - parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(solKind.String()) - if len(parts) != 4 { - return javaKind - } - switch parts[2] { - case "8", "16", "32", "64": - if parts[3] == "" { - return capitalise(fmt.Sprintf("%sint%s", parts[1], parts[2])) - } - return capitalise(fmt.Sprintf("%sint%ss", parts[1], parts[2])) - - default: - return javaKind - } - } + LangGo: func(string, abi.Type) string { panic("this shouldn't be needed") }, } // alias returns an alias of the given string based on the aliasing rules @@ -594,8 +428,7 @@ func alias(aliases map[string]string, n string) string { // methodNormalizer is a name transformer that modifies Solidity method names to // conform to target language naming conventions. var methodNormalizer = map[Lang]func(string) string{ - LangGo: abi.ToCamelCase, - LangJava: decapitalise, + LangGo: abi.ToCamelCase, } // capitalise makes a camel-case string which starts with an upper case character. diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index a2a6d77e5a..a027aab00f 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -1989,409 +1989,3 @@ func TestGolangBindings(t *testing.T) { t.Fatalf("failed to run binding test: %v\n%s", err, out) } } - -// Tests that java binding generated by the binder is exactly matched. -/*func TestJavaBindings(t *testing.T) { - var cases = []struct { - name string - contract string - abi string - bytecode string - expected string - }{ - { - "test", - ` - pragma experimental ABIEncoderV2; - pragma solidity ^0.5.2; - - contract test { - function setAddress(address a) public returns(address){} - function setAddressList(address[] memory a_l) public returns(address[] memory){} - function setAddressArray(address[2] memory a_a) public returns(address[2] memory){} - - function setUint8(uint8 u8) public returns(uint8){} - function setUint16(uint16 u16) public returns(uint16){} - function setUint32(uint32 u32) public returns(uint32){} - function setUint64(uint64 u64) public returns(uint64){} - function setUint256(uint256 u256) public returns(uint256){} - function setUint256List(uint256[] memory u256_l) public returns(uint256[] memory){} - function setUint256Array(uint256[2] memory u256_a) public returns(uint256[2] memory){} - - function setInt8(int8 i8) public returns(int8){} - function setInt16(int16 i16) public returns(int16){} - function setInt32(int32 i32) public returns(int32){} - function setInt64(int64 i64) public returns(int64){} - function setInt256(int256 i256) public returns(int256){} - function setInt256List(int256[] memory i256_l) public returns(int256[] memory){} - function setInt256Array(int256[2] memory i256_a) public returns(int256[2] memory){} - - function setBytes1(bytes1 b1) public returns(bytes1) {} - function setBytes32(bytes32 b32) public returns(bytes32) {} - function setBytes(bytes memory bs) public returns(bytes memory) {} - function setBytesList(bytes[] memory bs_l) public returns(bytes[] memory) {} - function setBytesArray(bytes[2] memory bs_a) public returns(bytes[2] memory) {} - - function setString(string memory s) public returns(string memory) {} - function setStringList(string[] memory s_l) public returns(string[] memory) {} - function setStringArray(string[2] memory s_a) public returns(string[2] memory) {} - - function setBool(bool b) public returns(bool) {} - function setBoolList(bool[] memory b_l) public returns(bool[] memory) {} - function setBoolArray(bool[2] memory b_a) public returns(bool[2] memory) {} - }`, - `[{"constant":false,"inputs":[{"name":"u16","type":"uint16"}],"name":"setUint16","outputs":[{"name":"","type":"uint16"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b_a","type":"bool[2]"}],"name":"setBoolArray","outputs":[{"name":"","type":"bool[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"a_a","type":"address[2]"}],"name":"setAddressArray","outputs":[{"name":"","type":"address[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"bs_l","type":"bytes[]"}],"name":"setBytesList","outputs":[{"name":"","type":"bytes[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u8","type":"uint8"}],"name":"setUint8","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u32","type":"uint32"}],"name":"setUint32","outputs":[{"name":"","type":"uint32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b","type":"bool"}],"name":"setBool","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i256_l","type":"int256[]"}],"name":"setInt256List","outputs":[{"name":"","type":"int256[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u256_a","type":"uint256[2]"}],"name":"setUint256Array","outputs":[{"name":"","type":"uint256[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b_l","type":"bool[]"}],"name":"setBoolList","outputs":[{"name":"","type":"bool[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"bs_a","type":"bytes[2]"}],"name":"setBytesArray","outputs":[{"name":"","type":"bytes[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"a_l","type":"address[]"}],"name":"setAddressList","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i256_a","type":"int256[2]"}],"name":"setInt256Array","outputs":[{"name":"","type":"int256[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"s_a","type":"string[2]"}],"name":"setStringArray","outputs":[{"name":"","type":"string[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"s","type":"string"}],"name":"setString","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u64","type":"uint64"}],"name":"setUint64","outputs":[{"name":"","type":"uint64"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i16","type":"int16"}],"name":"setInt16","outputs":[{"name":"","type":"int16"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i8","type":"int8"}],"name":"setInt8","outputs":[{"name":"","type":"int8"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u256_l","type":"uint256[]"}],"name":"setUint256List","outputs":[{"name":"","type":"uint256[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i256","type":"int256"}],"name":"setInt256","outputs":[{"name":"","type":"int256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i32","type":"int32"}],"name":"setInt32","outputs":[{"name":"","type":"int32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b32","type":"bytes32"}],"name":"setBytes32","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"s_l","type":"string[]"}],"name":"setStringList","outputs":[{"name":"","type":"string[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u256","type":"uint256"}],"name":"setUint256","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"bs","type":"bytes"}],"name":"setBytes","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"a","type":"address"}],"name":"setAddress","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i64","type":"int64"}],"name":"setInt64","outputs":[{"name":"","type":"int64"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b1","type":"bytes1"}],"name":"setBytes1","outputs":[{"name":"","type":"bytes1"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`, - `608060405234801561001057600080fd5b5061265a806100206000396000f3fe608060405234801561001057600080fd5b50600436106101e1576000357c0100000000000000000000000000000000000000000000000000000000900480637fcaf66611610116578063c2b12a73116100b4578063da359dc81161008e578063da359dc814610666578063e30081a014610696578063e673eb32146106c6578063fba1a1c3146106f6576101e1565b8063c2b12a73146105d6578063c577796114610606578063d2282dc514610636576101e1565b80639a19a953116100f05780639a19a95314610516578063a0709e1914610546578063a53b1c1e14610576578063b7d5df31146105a6576101e1565b80637fcaf66614610486578063822cba69146104b657806386114cea146104e6576101e1565b806322722302116101835780635119655d1161015d5780635119655d146103c65780635be6b37e146103f65780636aa482fc146104265780637173b69514610456576101e1565b806322722302146103365780632766a755146103665780634d5ee6da14610396576101e1565b806316c105e2116101bf57806316c105e2146102765780631774e646146102a65780631c9352e2146102d65780631e26fd3314610306576101e1565b80630477988a146101e6578063118a971814610216578063151f547114610246575b600080fd5b61020060048036036101fb9190810190611599565b610726565b60405161020d9190611f01565b60405180910390f35b610230600480360361022b919081019061118d565b61072d565b60405161023d9190611ca6565b60405180910390f35b610260600480360361025b9190810190611123565b61073a565b60405161026d9190611c69565b60405180910390f35b610290600480360361028b9190810190611238565b610747565b60405161029d9190611d05565b60405180910390f35b6102c060048036036102bb919081019061163d565b61074e565b6040516102cd9190611f6d565b60405180910390f35b6102f060048036036102eb91908101906115eb565b610755565b6040516102fd9190611f37565b60405180910390f35b610320600480360361031b91908101906113cf565b61075c565b60405161032d9190611de5565b60405180910390f35b610350600480360361034b91908101906112a2565b610763565b60405161035d9190611d42565b60405180910390f35b610380600480360361037b9190810190611365565b61076a565b60405161038d9190611da8565b60405180910390f35b6103b060048036036103ab91908101906111b6565b610777565b6040516103bd9190611cc1565b60405180910390f35b6103e060048036036103db91908101906111f7565b61077e565b6040516103ed9190611ce3565b60405180910390f35b610410600480360361040b919081019061114c565b61078b565b60405161041d9190611c84565b60405180910390f35b610440600480360361043b9190810190611279565b610792565b60405161044d9190611d27565b60405180910390f35b610470600480360361046b91908101906112e3565b61079f565b60405161047d9190611d64565b60405180910390f35b6104a0600480360361049b9190810190611558565b6107ac565b6040516104ad9190611edf565b60405180910390f35b6104d060048036036104cb9190810190611614565b6107b3565b6040516104dd9190611f52565b60405180910390f35b61050060048036036104fb919081019061148b565b6107ba565b60405161050d9190611e58565b60405180910390f35b610530600480360361052b919081019061152f565b6107c1565b60405161053d9190611ec4565b60405180910390f35b610560600480360361055b919081019061138e565b6107c8565b60405161056d9190611dc3565b60405180910390f35b610590600480360361058b91908101906114b4565b6107cf565b60405161059d9190611e73565b60405180910390f35b6105c060048036036105bb91908101906114dd565b6107d6565b6040516105cd9190611e8e565b60405180910390f35b6105f060048036036105eb9190810190611421565b6107dd565b6040516105fd9190611e1b565b60405180910390f35b610620600480360361061b9190810190611324565b6107e4565b60405161062d9190611d86565b60405180910390f35b610650600480360361064b91908101906115c2565b6107eb565b60405161065d9190611f1c565b60405180910390f35b610680600480360361067b919081019061144a565b6107f2565b60405161068d9190611e36565b60405180910390f35b6106b060048036036106ab91908101906110fa565b6107f9565b6040516106bd9190611c4e565b60405180910390f35b6106e060048036036106db9190810190611506565b610800565b6040516106ed9190611ea9565b60405180910390f35b610710600480360361070b91908101906113f8565b610807565b60405161071d9190611e00565b60405180910390f35b6000919050565b61073561080e565b919050565b610742610830565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b610772610852565b919050565b6060919050565b610786610874565b919050565b6060919050565b61079a61089b565b919050565b6107a76108bd565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108835790505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108cc5790505090565b60006108f082356124f2565b905092915050565b600082601f830112151561090b57600080fd5b600261091e61091982611fb5565b611f88565b9150818385602084028201111561093457600080fd5b60005b83811015610964578161094a88826108e4565b845260208401935060208301925050600181019050610937565b5050505092915050565b600082601f830112151561098157600080fd5b813561099461098f82611fd7565b611f88565b915081818352602084019350602081019050838560208402820111156109b957600080fd5b60005b838110156109e957816109cf88826108e4565b8452602084019350602083019250506001810190506109bc565b5050505092915050565b600082601f8301121515610a0657600080fd5b6002610a19610a1482611fff565b611f88565b91508183856020840282011115610a2f57600080fd5b60005b83811015610a5f5781610a458882610e9e565b845260208401935060208301925050600181019050610a32565b5050505092915050565b600082601f8301121515610a7c57600080fd5b8135610a8f610a8a82612021565b611f88565b91508181835260208401935060208101905083856020840282011115610ab457600080fd5b60005b83811015610ae45781610aca8882610e9e565b845260208401935060208301925050600181019050610ab7565b5050505092915050565b600082601f8301121515610b0157600080fd5b6002610b14610b0f82612049565b611f88565b9150818360005b83811015610b4b5781358601610b318882610eda565b845260208401935060208301925050600181019050610b1b565b5050505092915050565b600082601f8301121515610b6857600080fd5b8135610b7b610b768261206b565b611f88565b9150818183526020840193506020810190508360005b83811015610bc15781358601610ba78882610eda565b845260208401935060208301925050600181019050610b91565b5050505092915050565b600082601f8301121515610bde57600080fd5b6002610bf1610bec82612093565b611f88565b91508183856020840282011115610c0757600080fd5b60005b83811015610c375781610c1d8882610f9a565b845260208401935060208301925050600181019050610c0a565b5050505092915050565b600082601f8301121515610c5457600080fd5b8135610c67610c62826120b5565b611f88565b91508181835260208401935060208101905083856020840282011115610c8c57600080fd5b60005b83811015610cbc5781610ca28882610f9a565b845260208401935060208301925050600181019050610c8f565b5050505092915050565b600082601f8301121515610cd957600080fd5b6002610cec610ce7826120dd565b611f88565b9150818360005b83811015610d235781358601610d098882610fea565b845260208401935060208301925050600181019050610cf3565b5050505092915050565b600082601f8301121515610d4057600080fd5b8135610d53610d4e826120ff565b611f88565b9150818183526020840193506020810190508360005b83811015610d995781358601610d7f8882610fea565b845260208401935060208301925050600181019050610d69565b5050505092915050565b600082601f8301121515610db657600080fd5b6002610dc9610dc482612127565b611f88565b91508183856020840282011115610ddf57600080fd5b60005b83811015610e0f5781610df588826110aa565b845260208401935060208301925050600181019050610de2565b5050505092915050565b600082601f8301121515610e2c57600080fd5b8135610e3f610e3a82612149565b611f88565b91508181835260208401935060208101905083856020840282011115610e6457600080fd5b60005b83811015610e945781610e7a88826110aa565b845260208401935060208301925050600181019050610e67565b5050505092915050565b6000610eaa8235612504565b905092915050565b6000610ebe8235612510565b905092915050565b6000610ed2823561253c565b905092915050565b600082601f8301121515610eed57600080fd5b8135610f00610efb82612171565b611f88565b91508082526020830160208301858383011115610f1c57600080fd5b610f278382846125cd565b50505092915050565b600082601f8301121515610f4357600080fd5b8135610f56610f518261219d565b611f88565b91508082526020830160208301858383011115610f7257600080fd5b610f7d8382846125cd565b50505092915050565b6000610f928235612546565b905092915050565b6000610fa68235612553565b905092915050565b6000610fba823561255d565b905092915050565b6000610fce823561256a565b905092915050565b6000610fe28235612577565b905092915050565b600082601f8301121515610ffd57600080fd5b813561101061100b826121c9565b611f88565b9150808252602083016020830185838301111561102c57600080fd5b6110378382846125cd565b50505092915050565b600082601f830112151561105357600080fd5b8135611066611061826121f5565b611f88565b9150808252602083016020830185838301111561108257600080fd5b61108d8382846125cd565b50505092915050565b60006110a28235612584565b905092915050565b60006110b68235612592565b905092915050565b60006110ca823561259c565b905092915050565b60006110de82356125ac565b905092915050565b60006110f282356125c0565b905092915050565b60006020828403121561110c57600080fd5b600061111a848285016108e4565b91505092915050565b60006040828403121561113557600080fd5b6000611143848285016108f8565b91505092915050565b60006020828403121561115e57600080fd5b600082013567ffffffffffffffff81111561117857600080fd5b6111848482850161096e565b91505092915050565b60006040828403121561119f57600080fd5b60006111ad848285016109f3565b91505092915050565b6000602082840312156111c857600080fd5b600082013567ffffffffffffffff8111156111e257600080fd5b6111ee84828501610a69565b91505092915050565b60006020828403121561120957600080fd5b600082013567ffffffffffffffff81111561122357600080fd5b61122f84828501610aee565b91505092915050565b60006020828403121561124a57600080fd5b600082013567ffffffffffffffff81111561126457600080fd5b61127084828501610b55565b91505092915050565b60006040828403121561128b57600080fd5b600061129984828501610bcb565b91505092915050565b6000602082840312156112b457600080fd5b600082013567ffffffffffffffff8111156112ce57600080fd5b6112da84828501610c41565b91505092915050565b6000602082840312156112f557600080fd5b600082013567ffffffffffffffff81111561130f57600080fd5b61131b84828501610cc6565b91505092915050565b60006020828403121561133657600080fd5b600082013567ffffffffffffffff81111561135057600080fd5b61135c84828501610d2d565b91505092915050565b60006040828403121561137757600080fd5b600061138584828501610da3565b91505092915050565b6000602082840312156113a057600080fd5b600082013567ffffffffffffffff8111156113ba57600080fd5b6113c684828501610e19565b91505092915050565b6000602082840312156113e157600080fd5b60006113ef84828501610e9e565b91505092915050565b60006020828403121561140a57600080fd5b600061141884828501610eb2565b91505092915050565b60006020828403121561143357600080fd5b600061144184828501610ec6565b91505092915050565b60006020828403121561145c57600080fd5b600082013567ffffffffffffffff81111561147657600080fd5b61148284828501610f30565b91505092915050565b60006020828403121561149d57600080fd5b60006114ab84828501610f86565b91505092915050565b6000602082840312156114c657600080fd5b60006114d484828501610f9a565b91505092915050565b6000602082840312156114ef57600080fd5b60006114fd84828501610fae565b91505092915050565b60006020828403121561151857600080fd5b600061152684828501610fc2565b91505092915050565b60006020828403121561154157600080fd5b600061154f84828501610fd6565b91505092915050565b60006020828403121561156a57600080fd5b600082013567ffffffffffffffff81111561158457600080fd5b61159084828501611040565b91505092915050565b6000602082840312156115ab57600080fd5b60006115b984828501611096565b91505092915050565b6000602082840312156115d457600080fd5b60006115e2848285016110aa565b91505092915050565b6000602082840312156115fd57600080fd5b600061160b848285016110be565b91505092915050565b60006020828403121561162657600080fd5b6000611634848285016110d2565b91505092915050565b60006020828403121561164f57600080fd5b600061165d848285016110e6565b91505092915050565b61166f816123f7565b82525050565b61167e816122ab565b61168782612221565b60005b828110156116b95761169d858351611666565b6116a68261235b565b915060208501945060018101905061168a565b5050505050565b60006116cb826122b6565b8084526020840193506116dd8361222b565b60005b8281101561170f576116f3868351611666565b6116fc82612368565b91506020860195506001810190506116e0565b50849250505092915050565b611724816122c1565b61172d82612238565b60005b8281101561175f57611743858351611ab3565b61174c82612375565b9150602085019450600181019050611730565b5050505050565b6000611771826122cc565b80845260208401935061178383612242565b60005b828110156117b557611799868351611ab3565b6117a282612382565b9150602086019550600181019050611786565b50849250505092915050565b60006117cc826122d7565b836020820285016117dc8561224f565b60005b848110156118155783830388526117f7838351611b16565b92506118028261238f565b91506020880197506001810190506117df565b508196508694505050505092915050565b6000611831826122e2565b8084526020840193508360208202850161184a85612259565b60005b84811015611883578383038852611865838351611b16565b92506118708261239c565b915060208801975060018101905061184d565b508196508694505050505092915050565b61189d816122ed565b6118a682612266565b60005b828110156118d8576118bc858351611b5b565b6118c5826123a9565b91506020850194506001810190506118a9565b5050505050565b60006118ea826122f8565b8084526020840193506118fc83612270565b60005b8281101561192e57611912868351611b5b565b61191b826123b6565b91506020860195506001810190506118ff565b50849250505092915050565b600061194582612303565b836020820285016119558561227d565b60005b8481101561198e578383038852611970838351611bcd565b925061197b826123c3565b9150602088019750600181019050611958565b508196508694505050505092915050565b60006119aa8261230e565b808452602084019350836020820285016119c385612287565b60005b848110156119fc5783830388526119de838351611bcd565b92506119e9826123d0565b91506020880197506001810190506119c6565b508196508694505050505092915050565b611a1681612319565b611a1f82612294565b60005b82811015611a5157611a35858351611c12565b611a3e826123dd565b9150602085019450600181019050611a22565b5050505050565b6000611a6382612324565b808452602084019350611a758361229e565b60005b82811015611aa757611a8b868351611c12565b611a94826123ea565b9150602086019550600181019050611a78565b50849250505092915050565b611abc81612409565b82525050565b611acb81612415565b82525050565b611ada81612441565b82525050565b6000611aeb8261233a565b808452611aff8160208601602086016125dc565b611b088161260f565b602085010191505092915050565b6000611b218261232f565b808452611b358160208601602086016125dc565b611b3e8161260f565b602085010191505092915050565b611b558161244b565b82525050565b611b6481612458565b82525050565b611b7381612462565b82525050565b611b828161246f565b82525050565b611b918161247c565b82525050565b6000611ba282612350565b808452611bb68160208601602086016125dc565b611bbf8161260f565b602085010191505092915050565b6000611bd882612345565b808452611bec8160208601602086016125dc565b611bf58161260f565b602085010191505092915050565b611c0c81612489565b82525050565b611c1b816124b7565b82525050565b611c2a816124c1565b82525050565b611c39816124d1565b82525050565b611c48816124e5565b82525050565b6000602082019050611c636000830184611666565b92915050565b6000604082019050611c7e6000830184611675565b92915050565b60006020820190508181036000830152611c9e81846116c0565b905092915050565b6000604082019050611cbb600083018461171b565b92915050565b60006020820190508181036000830152611cdb8184611766565b905092915050565b60006020820190508181036000830152611cfd81846117c1565b905092915050565b60006020820190508181036000830152611d1f8184611826565b905092915050565b6000604082019050611d3c6000830184611894565b92915050565b60006020820190508181036000830152611d5c81846118df565b905092915050565b60006020820190508181036000830152611d7e818461193a565b905092915050565b60006020820190508181036000830152611da0818461199f565b905092915050565b6000604082019050611dbd6000830184611a0d565b92915050565b60006020820190508181036000830152611ddd8184611a58565b905092915050565b6000602082019050611dfa6000830184611ab3565b92915050565b6000602082019050611e156000830184611ac2565b92915050565b6000602082019050611e306000830184611ad1565b92915050565b60006020820190508181036000830152611e508184611ae0565b905092915050565b6000602082019050611e6d6000830184611b4c565b92915050565b6000602082019050611e886000830184611b5b565b92915050565b6000602082019050611ea36000830184611b6a565b92915050565b6000602082019050611ebe6000830184611b79565b92915050565b6000602082019050611ed96000830184611b88565b92915050565b60006020820190508181036000830152611ef98184611b97565b905092915050565b6000602082019050611f166000830184611c03565b92915050565b6000602082019050611f316000830184611c12565b92915050565b6000602082019050611f4c6000830184611c21565b92915050565b6000602082019050611f676000830184611c30565b92915050565b6000602082019050611f826000830184611c3f565b92915050565b6000604051905081810181811067ffffffffffffffff82111715611fab57600080fd5b8060405250919050565b600067ffffffffffffffff821115611fcc57600080fd5b602082029050919050565b600067ffffffffffffffff821115611fee57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561201657600080fd5b602082029050919050565b600067ffffffffffffffff82111561203857600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561206057600080fd5b602082029050919050565b600067ffffffffffffffff82111561208257600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120aa57600080fd5b602082029050919050565b600067ffffffffffffffff8211156120cc57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120f457600080fd5b602082029050919050565b600067ffffffffffffffff82111561211657600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561213e57600080fd5b602082029050919050565b600067ffffffffffffffff82111561216057600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561218857600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121b457600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121e057600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff82111561220c57600080fd5b601f19601f8301169050602081019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600061240282612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b60006124fd82612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b838110156125fa5780820151818401526020810190506125df565b83811115612609576000848401525b50505050565b6000601f19601f830116905091905056fea265627a7a723058206fe37171cf1b10ebd291cfdca61d67e7fc3c208795e999c833c42a14d86cf00d6c6578706572696d656e74616cf50037`, - ` -// This file is an automatically generated Java binding. Do not modify as any -// change will likely be lost upon the next re-generation! - -package bindtest; - -import org.ethereum.geth.*; -import java.util.*; - -public class Test { - // ABI is the input ABI used to generate the binding from. - public final static String ABI = "[{\"constant\":false,\"inputs\":[{\"name\":\"u16\",\"type\":\"uint16\"}],\"name\":\"setUint16\",\"outputs\":[{\"name\":\"\",\"type\":\"uint16\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b_a\",\"type\":\"bool[2]\"}],\"name\":\"setBoolArray\",\"outputs\":[{\"name\":\"\",\"type\":\"bool[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a_a\",\"type\":\"address[2]\"}],\"name\":\"setAddressArray\",\"outputs\":[{\"name\":\"\",\"type\":\"address[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs_l\",\"type\":\"bytes[]\"}],\"name\":\"setBytesList\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u8\",\"type\":\"uint8\"}],\"name\":\"setUint8\",\"outputs\":[{\"name\":\"\",\"type\":\"uint8\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u32\",\"type\":\"uint32\"}],\"name\":\"setUint32\",\"outputs\":[{\"name\":\"\",\"type\":\"uint32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b\",\"type\":\"bool\"}],\"name\":\"setBool\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256_l\",\"type\":\"int256[]\"}],\"name\":\"setInt256List\",\"outputs\":[{\"name\":\"\",\"type\":\"int256[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256_a\",\"type\":\"uint256[2]\"}],\"name\":\"setUint256Array\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b_l\",\"type\":\"bool[]\"}],\"name\":\"setBoolList\",\"outputs\":[{\"name\":\"\",\"type\":\"bool[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs_a\",\"type\":\"bytes[2]\"}],\"name\":\"setBytesArray\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a_l\",\"type\":\"address[]\"}],\"name\":\"setAddressList\",\"outputs\":[{\"name\":\"\",\"type\":\"address[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256_a\",\"type\":\"int256[2]\"}],\"name\":\"setInt256Array\",\"outputs\":[{\"name\":\"\",\"type\":\"int256[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s_a\",\"type\":\"string[2]\"}],\"name\":\"setStringArray\",\"outputs\":[{\"name\":\"\",\"type\":\"string[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s\",\"type\":\"string\"}],\"name\":\"setString\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u64\",\"type\":\"uint64\"}],\"name\":\"setUint64\",\"outputs\":[{\"name\":\"\",\"type\":\"uint64\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i16\",\"type\":\"int16\"}],\"name\":\"setInt16\",\"outputs\":[{\"name\":\"\",\"type\":\"int16\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i8\",\"type\":\"int8\"}],\"name\":\"setInt8\",\"outputs\":[{\"name\":\"\",\"type\":\"int8\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256_l\",\"type\":\"uint256[]\"}],\"name\":\"setUint256List\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256\",\"type\":\"int256\"}],\"name\":\"setInt256\",\"outputs\":[{\"name\":\"\",\"type\":\"int256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i32\",\"type\":\"int32\"}],\"name\":\"setInt32\",\"outputs\":[{\"name\":\"\",\"type\":\"int32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b32\",\"type\":\"bytes32\"}],\"name\":\"setBytes32\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s_l\",\"type\":\"string[]\"}],\"name\":\"setStringList\",\"outputs\":[{\"name\":\"\",\"type\":\"string[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256\",\"type\":\"uint256\"}],\"name\":\"setUint256\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs\",\"type\":\"bytes\"}],\"name\":\"setBytes\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a\",\"type\":\"address\"}],\"name\":\"setAddress\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i64\",\"type\":\"int64\"}],\"name\":\"setInt64\",\"outputs\":[{\"name\":\"\",\"type\":\"int64\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b1\",\"type\":\"bytes1\"}],\"name\":\"setBytes1\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes1\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"; - - // BYTECODE is the compiled bytecode used for deploying new contracts. - public final static String BYTECODE = "0x608060405234801561001057600080fd5b5061265a806100206000396000f3fe608060405234801561001057600080fd5b50600436106101e1576000357c0100000000000000000000000000000000000000000000000000000000900480637fcaf66611610116578063c2b12a73116100b4578063da359dc81161008e578063da359dc814610666578063e30081a014610696578063e673eb32146106c6578063fba1a1c3146106f6576101e1565b8063c2b12a73146105d6578063c577796114610606578063d2282dc514610636576101e1565b80639a19a953116100f05780639a19a95314610516578063a0709e1914610546578063a53b1c1e14610576578063b7d5df31146105a6576101e1565b80637fcaf66614610486578063822cba69146104b657806386114cea146104e6576101e1565b806322722302116101835780635119655d1161015d5780635119655d146103c65780635be6b37e146103f65780636aa482fc146104265780637173b69514610456576101e1565b806322722302146103365780632766a755146103665780634d5ee6da14610396576101e1565b806316c105e2116101bf57806316c105e2146102765780631774e646146102a65780631c9352e2146102d65780631e26fd3314610306576101e1565b80630477988a146101e6578063118a971814610216578063151f547114610246575b600080fd5b61020060048036036101fb9190810190611599565b610726565b60405161020d9190611f01565b60405180910390f35b610230600480360361022b919081019061118d565b61072d565b60405161023d9190611ca6565b60405180910390f35b610260600480360361025b9190810190611123565b61073a565b60405161026d9190611c69565b60405180910390f35b610290600480360361028b9190810190611238565b610747565b60405161029d9190611d05565b60405180910390f35b6102c060048036036102bb919081019061163d565b61074e565b6040516102cd9190611f6d565b60405180910390f35b6102f060048036036102eb91908101906115eb565b610755565b6040516102fd9190611f37565b60405180910390f35b610320600480360361031b91908101906113cf565b61075c565b60405161032d9190611de5565b60405180910390f35b610350600480360361034b91908101906112a2565b610763565b60405161035d9190611d42565b60405180910390f35b610380600480360361037b9190810190611365565b61076a565b60405161038d9190611da8565b60405180910390f35b6103b060048036036103ab91908101906111b6565b610777565b6040516103bd9190611cc1565b60405180910390f35b6103e060048036036103db91908101906111f7565b61077e565b6040516103ed9190611ce3565b60405180910390f35b610410600480360361040b919081019061114c565b61078b565b60405161041d9190611c84565b60405180910390f35b610440600480360361043b9190810190611279565b610792565b60405161044d9190611d27565b60405180910390f35b610470600480360361046b91908101906112e3565b61079f565b60405161047d9190611d64565b60405180910390f35b6104a0600480360361049b9190810190611558565b6107ac565b6040516104ad9190611edf565b60405180910390f35b6104d060048036036104cb9190810190611614565b6107b3565b6040516104dd9190611f52565b60405180910390f35b61050060048036036104fb919081019061148b565b6107ba565b60405161050d9190611e58565b60405180910390f35b610530600480360361052b919081019061152f565b6107c1565b60405161053d9190611ec4565b60405180910390f35b610560600480360361055b919081019061138e565b6107c8565b60405161056d9190611dc3565b60405180910390f35b610590600480360361058b91908101906114b4565b6107cf565b60405161059d9190611e73565b60405180910390f35b6105c060048036036105bb91908101906114dd565b6107d6565b6040516105cd9190611e8e565b60405180910390f35b6105f060048036036105eb9190810190611421565b6107dd565b6040516105fd9190611e1b565b60405180910390f35b610620600480360361061b9190810190611324565b6107e4565b60405161062d9190611d86565b60405180910390f35b610650600480360361064b91908101906115c2565b6107eb565b60405161065d9190611f1c565b60405180910390f35b610680600480360361067b919081019061144a565b6107f2565b60405161068d9190611e36565b60405180910390f35b6106b060048036036106ab91908101906110fa565b6107f9565b6040516106bd9190611c4e565b60405180910390f35b6106e060048036036106db9190810190611506565b610800565b6040516106ed9190611ea9565b60405180910390f35b610710600480360361070b91908101906113f8565b610807565b60405161071d9190611e00565b60405180910390f35b6000919050565b61073561080e565b919050565b610742610830565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b610772610852565b919050565b6060919050565b610786610874565b919050565b6060919050565b61079a61089b565b919050565b6107a76108bd565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108835790505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108cc5790505090565b60006108f082356124f2565b905092915050565b600082601f830112151561090b57600080fd5b600261091e61091982611fb5565b611f88565b9150818385602084028201111561093457600080fd5b60005b83811015610964578161094a88826108e4565b845260208401935060208301925050600181019050610937565b5050505092915050565b600082601f830112151561098157600080fd5b813561099461098f82611fd7565b611f88565b915081818352602084019350602081019050838560208402820111156109b957600080fd5b60005b838110156109e957816109cf88826108e4565b8452602084019350602083019250506001810190506109bc565b5050505092915050565b600082601f8301121515610a0657600080fd5b6002610a19610a1482611fff565b611f88565b91508183856020840282011115610a2f57600080fd5b60005b83811015610a5f5781610a458882610e9e565b845260208401935060208301925050600181019050610a32565b5050505092915050565b600082601f8301121515610a7c57600080fd5b8135610a8f610a8a82612021565b611f88565b91508181835260208401935060208101905083856020840282011115610ab457600080fd5b60005b83811015610ae45781610aca8882610e9e565b845260208401935060208301925050600181019050610ab7565b5050505092915050565b600082601f8301121515610b0157600080fd5b6002610b14610b0f82612049565b611f88565b9150818360005b83811015610b4b5781358601610b318882610eda565b845260208401935060208301925050600181019050610b1b565b5050505092915050565b600082601f8301121515610b6857600080fd5b8135610b7b610b768261206b565b611f88565b9150818183526020840193506020810190508360005b83811015610bc15781358601610ba78882610eda565b845260208401935060208301925050600181019050610b91565b5050505092915050565b600082601f8301121515610bde57600080fd5b6002610bf1610bec82612093565b611f88565b91508183856020840282011115610c0757600080fd5b60005b83811015610c375781610c1d8882610f9a565b845260208401935060208301925050600181019050610c0a565b5050505092915050565b600082601f8301121515610c5457600080fd5b8135610c67610c62826120b5565b611f88565b91508181835260208401935060208101905083856020840282011115610c8c57600080fd5b60005b83811015610cbc5781610ca28882610f9a565b845260208401935060208301925050600181019050610c8f565b5050505092915050565b600082601f8301121515610cd957600080fd5b6002610cec610ce7826120dd565b611f88565b9150818360005b83811015610d235781358601610d098882610fea565b845260208401935060208301925050600181019050610cf3565b5050505092915050565b600082601f8301121515610d4057600080fd5b8135610d53610d4e826120ff565b611f88565b9150818183526020840193506020810190508360005b83811015610d995781358601610d7f8882610fea565b845260208401935060208301925050600181019050610d69565b5050505092915050565b600082601f8301121515610db657600080fd5b6002610dc9610dc482612127565b611f88565b91508183856020840282011115610ddf57600080fd5b60005b83811015610e0f5781610df588826110aa565b845260208401935060208301925050600181019050610de2565b5050505092915050565b600082601f8301121515610e2c57600080fd5b8135610e3f610e3a82612149565b611f88565b91508181835260208401935060208101905083856020840282011115610e6457600080fd5b60005b83811015610e945781610e7a88826110aa565b845260208401935060208301925050600181019050610e67565b5050505092915050565b6000610eaa8235612504565b905092915050565b6000610ebe8235612510565b905092915050565b6000610ed2823561253c565b905092915050565b600082601f8301121515610eed57600080fd5b8135610f00610efb82612171565b611f88565b91508082526020830160208301858383011115610f1c57600080fd5b610f278382846125cd565b50505092915050565b600082601f8301121515610f4357600080fd5b8135610f56610f518261219d565b611f88565b91508082526020830160208301858383011115610f7257600080fd5b610f7d8382846125cd565b50505092915050565b6000610f928235612546565b905092915050565b6000610fa68235612553565b905092915050565b6000610fba823561255d565b905092915050565b6000610fce823561256a565b905092915050565b6000610fe28235612577565b905092915050565b600082601f8301121515610ffd57600080fd5b813561101061100b826121c9565b611f88565b9150808252602083016020830185838301111561102c57600080fd5b6110378382846125cd565b50505092915050565b600082601f830112151561105357600080fd5b8135611066611061826121f5565b611f88565b9150808252602083016020830185838301111561108257600080fd5b61108d8382846125cd565b50505092915050565b60006110a28235612584565b905092915050565b60006110b68235612592565b905092915050565b60006110ca823561259c565b905092915050565b60006110de82356125ac565b905092915050565b60006110f282356125c0565b905092915050565b60006020828403121561110c57600080fd5b600061111a848285016108e4565b91505092915050565b60006040828403121561113557600080fd5b6000611143848285016108f8565b91505092915050565b60006020828403121561115e57600080fd5b600082013567ffffffffffffffff81111561117857600080fd5b6111848482850161096e565b91505092915050565b60006040828403121561119f57600080fd5b60006111ad848285016109f3565b91505092915050565b6000602082840312156111c857600080fd5b600082013567ffffffffffffffff8111156111e257600080fd5b6111ee84828501610a69565b91505092915050565b60006020828403121561120957600080fd5b600082013567ffffffffffffffff81111561122357600080fd5b61122f84828501610aee565b91505092915050565b60006020828403121561124a57600080fd5b600082013567ffffffffffffffff81111561126457600080fd5b61127084828501610b55565b91505092915050565b60006040828403121561128b57600080fd5b600061129984828501610bcb565b91505092915050565b6000602082840312156112b457600080fd5b600082013567ffffffffffffffff8111156112ce57600080fd5b6112da84828501610c41565b91505092915050565b6000602082840312156112f557600080fd5b600082013567ffffffffffffffff81111561130f57600080fd5b61131b84828501610cc6565b91505092915050565b60006020828403121561133657600080fd5b600082013567ffffffffffffffff81111561135057600080fd5b61135c84828501610d2d565b91505092915050565b60006040828403121561137757600080fd5b600061138584828501610da3565b91505092915050565b6000602082840312156113a057600080fd5b600082013567ffffffffffffffff8111156113ba57600080fd5b6113c684828501610e19565b91505092915050565b6000602082840312156113e157600080fd5b60006113ef84828501610e9e565b91505092915050565b60006020828403121561140a57600080fd5b600061141884828501610eb2565b91505092915050565b60006020828403121561143357600080fd5b600061144184828501610ec6565b91505092915050565b60006020828403121561145c57600080fd5b600082013567ffffffffffffffff81111561147657600080fd5b61148284828501610f30565b91505092915050565b60006020828403121561149d57600080fd5b60006114ab84828501610f86565b91505092915050565b6000602082840312156114c657600080fd5b60006114d484828501610f9a565b91505092915050565b6000602082840312156114ef57600080fd5b60006114fd84828501610fae565b91505092915050565b60006020828403121561151857600080fd5b600061152684828501610fc2565b91505092915050565b60006020828403121561154157600080fd5b600061154f84828501610fd6565b91505092915050565b60006020828403121561156a57600080fd5b600082013567ffffffffffffffff81111561158457600080fd5b61159084828501611040565b91505092915050565b6000602082840312156115ab57600080fd5b60006115b984828501611096565b91505092915050565b6000602082840312156115d457600080fd5b60006115e2848285016110aa565b91505092915050565b6000602082840312156115fd57600080fd5b600061160b848285016110be565b91505092915050565b60006020828403121561162657600080fd5b6000611634848285016110d2565b91505092915050565b60006020828403121561164f57600080fd5b600061165d848285016110e6565b91505092915050565b61166f816123f7565b82525050565b61167e816122ab565b61168782612221565b60005b828110156116b95761169d858351611666565b6116a68261235b565b915060208501945060018101905061168a565b5050505050565b60006116cb826122b6565b8084526020840193506116dd8361222b565b60005b8281101561170f576116f3868351611666565b6116fc82612368565b91506020860195506001810190506116e0565b50849250505092915050565b611724816122c1565b61172d82612238565b60005b8281101561175f57611743858351611ab3565b61174c82612375565b9150602085019450600181019050611730565b5050505050565b6000611771826122cc565b80845260208401935061178383612242565b60005b828110156117b557611799868351611ab3565b6117a282612382565b9150602086019550600181019050611786565b50849250505092915050565b60006117cc826122d7565b836020820285016117dc8561224f565b60005b848110156118155783830388526117f7838351611b16565b92506118028261238f565b91506020880197506001810190506117df565b508196508694505050505092915050565b6000611831826122e2565b8084526020840193508360208202850161184a85612259565b60005b84811015611883578383038852611865838351611b16565b92506118708261239c565b915060208801975060018101905061184d565b508196508694505050505092915050565b61189d816122ed565b6118a682612266565b60005b828110156118d8576118bc858351611b5b565b6118c5826123a9565b91506020850194506001810190506118a9565b5050505050565b60006118ea826122f8565b8084526020840193506118fc83612270565b60005b8281101561192e57611912868351611b5b565b61191b826123b6565b91506020860195506001810190506118ff565b50849250505092915050565b600061194582612303565b836020820285016119558561227d565b60005b8481101561198e578383038852611970838351611bcd565b925061197b826123c3565b9150602088019750600181019050611958565b508196508694505050505092915050565b60006119aa8261230e565b808452602084019350836020820285016119c385612287565b60005b848110156119fc5783830388526119de838351611bcd565b92506119e9826123d0565b91506020880197506001810190506119c6565b508196508694505050505092915050565b611a1681612319565b611a1f82612294565b60005b82811015611a5157611a35858351611c12565b611a3e826123dd565b9150602085019450600181019050611a22565b5050505050565b6000611a6382612324565b808452602084019350611a758361229e565b60005b82811015611aa757611a8b868351611c12565b611a94826123ea565b9150602086019550600181019050611a78565b50849250505092915050565b611abc81612409565b82525050565b611acb81612415565b82525050565b611ada81612441565b82525050565b6000611aeb8261233a565b808452611aff8160208601602086016125dc565b611b088161260f565b602085010191505092915050565b6000611b218261232f565b808452611b358160208601602086016125dc565b611b3e8161260f565b602085010191505092915050565b611b558161244b565b82525050565b611b6481612458565b82525050565b611b7381612462565b82525050565b611b828161246f565b82525050565b611b918161247c565b82525050565b6000611ba282612350565b808452611bb68160208601602086016125dc565b611bbf8161260f565b602085010191505092915050565b6000611bd882612345565b808452611bec8160208601602086016125dc565b611bf58161260f565b602085010191505092915050565b611c0c81612489565b82525050565b611c1b816124b7565b82525050565b611c2a816124c1565b82525050565b611c39816124d1565b82525050565b611c48816124e5565b82525050565b6000602082019050611c636000830184611666565b92915050565b6000604082019050611c7e6000830184611675565b92915050565b60006020820190508181036000830152611c9e81846116c0565b905092915050565b6000604082019050611cbb600083018461171b565b92915050565b60006020820190508181036000830152611cdb8184611766565b905092915050565b60006020820190508181036000830152611cfd81846117c1565b905092915050565b60006020820190508181036000830152611d1f8184611826565b905092915050565b6000604082019050611d3c6000830184611894565b92915050565b60006020820190508181036000830152611d5c81846118df565b905092915050565b60006020820190508181036000830152611d7e818461193a565b905092915050565b60006020820190508181036000830152611da0818461199f565b905092915050565b6000604082019050611dbd6000830184611a0d565b92915050565b60006020820190508181036000830152611ddd8184611a58565b905092915050565b6000602082019050611dfa6000830184611ab3565b92915050565b6000602082019050611e156000830184611ac2565b92915050565b6000602082019050611e306000830184611ad1565b92915050565b60006020820190508181036000830152611e508184611ae0565b905092915050565b6000602082019050611e6d6000830184611b4c565b92915050565b6000602082019050611e886000830184611b5b565b92915050565b6000602082019050611ea36000830184611b6a565b92915050565b6000602082019050611ebe6000830184611b79565b92915050565b6000602082019050611ed96000830184611b88565b92915050565b60006020820190508181036000830152611ef98184611b97565b905092915050565b6000602082019050611f166000830184611c03565b92915050565b6000602082019050611f316000830184611c12565b92915050565b6000602082019050611f4c6000830184611c21565b92915050565b6000602082019050611f676000830184611c30565b92915050565b6000602082019050611f826000830184611c3f565b92915050565b6000604051905081810181811067ffffffffffffffff82111715611fab57600080fd5b8060405250919050565b600067ffffffffffffffff821115611fcc57600080fd5b602082029050919050565b600067ffffffffffffffff821115611fee57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561201657600080fd5b602082029050919050565b600067ffffffffffffffff82111561203857600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561206057600080fd5b602082029050919050565b600067ffffffffffffffff82111561208257600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120aa57600080fd5b602082029050919050565b600067ffffffffffffffff8211156120cc57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120f457600080fd5b602082029050919050565b600067ffffffffffffffff82111561211657600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561213e57600080fd5b602082029050919050565b600067ffffffffffffffff82111561216057600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561218857600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121b457600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121e057600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff82111561220c57600080fd5b601f19601f8301169050602081019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600061240282612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b60006124fd82612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b838110156125fa5780820151818401526020810190506125df565b83811115612609576000848401525b50505050565b6000601f19601f830116905091905056fea265627a7a723058206fe37171cf1b10ebd291cfdca61d67e7fc3c208795e999c833c42a14d86cf00d6c6578706572696d656e74616cf50037"; - - // deploy deploys a new Ethereum contract, binding an instance of Test to it. - public static Test deploy(TransactOpts auth, EthereumClient client) throws Exception { - Interfaces args = Geth.newInterfaces(0); - String bytecode = BYTECODE; - return new Test(Geth.deployContract(auth, ABI, Geth.decodeFromHex(bytecode), client, args)); - } - - // Internal constructor used by contract deployment. - private Test(BoundContract deployment) { - this.Address = deployment.getAddress(); - this.Deployer = deployment.getDeployer(); - this.Contract = deployment; - } - - // Ethereum address where this contract is located at. - public final Address Address; - - // Ethereum transaction in which this contract was deployed (if known!). - public final Transaction Deployer; - - // Contract instance bound to a blockchain address. - private final BoundContract Contract; - - // Creates a new instance of Test, bound to a specific deployed contract. - public Test(Address address, EthereumClient client) throws Exception { - this(Geth.bindContract(address, ABI, client)); - } - - // setAddress is a paid mutator transaction binding the contract method 0xe30081a0. - // - // Solidity: function setAddress(address a) returns(address) - public Transaction setAddress(TransactOpts opts, Address a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setAddress(a);args.set(0,arg0); - - return this.Contract.transact(opts, "setAddress" , args); - } - - // setAddressArray is a paid mutator transaction binding the contract method 0x151f5471. - // - // Solidity: function setAddressArray(address[2] a_a) returns(address[2]) - public Transaction setAddressArray(TransactOpts opts, Addresses a_a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setAddresses(a_a);args.set(0,arg0); - - return this.Contract.transact(opts, "setAddressArray" , args); - } - - // setAddressList is a paid mutator transaction binding the contract method 0x5be6b37e. - // - // Solidity: function setAddressList(address[] a_l) returns(address[]) - public Transaction setAddressList(TransactOpts opts, Addresses a_l) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setAddresses(a_l);args.set(0,arg0); - - return this.Contract.transact(opts, "setAddressList" , args); - } - - // setBool is a paid mutator transaction binding the contract method 0x1e26fd33. - // - // Solidity: function setBool(bool b) returns(bool) - public Transaction setBool(TransactOpts opts, boolean b) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBool(b);args.set(0,arg0); - - return this.Contract.transact(opts, "setBool" , args); - } - - // setBoolArray is a paid mutator transaction binding the contract method 0x118a9718. - // - // Solidity: function setBoolArray(bool[2] b_a) returns(bool[2]) - public Transaction setBoolArray(TransactOpts opts, Bools b_a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBools(b_a);args.set(0,arg0); - - return this.Contract.transact(opts, "setBoolArray" , args); - } - - // setBoolList is a paid mutator transaction binding the contract method 0x4d5ee6da. - // - // Solidity: function setBoolList(bool[] b_l) returns(bool[]) - public Transaction setBoolList(TransactOpts opts, Bools b_l) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBools(b_l);args.set(0,arg0); - - return this.Contract.transact(opts, "setBoolList" , args); - } - - // setBytes is a paid mutator transaction binding the contract method 0xda359dc8. - // - // Solidity: function setBytes(bytes bs) returns(bytes) - public Transaction setBytes(TransactOpts opts, byte[] bs) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBinary(bs);args.set(0,arg0); - - return this.Contract.transact(opts, "setBytes" , args); - } - - // setBytes1 is a paid mutator transaction binding the contract method 0xfba1a1c3. - // - // Solidity: function setBytes1(bytes1 b1) returns(bytes1) - public Transaction setBytes1(TransactOpts opts, byte[] b1) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBinary(b1);args.set(0,arg0); - - return this.Contract.transact(opts, "setBytes1" , args); - } - - // setBytes32 is a paid mutator transaction binding the contract method 0xc2b12a73. - // - // Solidity: function setBytes32(bytes32 b32) returns(bytes32) - public Transaction setBytes32(TransactOpts opts, byte[] b32) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBinary(b32);args.set(0,arg0); - - return this.Contract.transact(opts, "setBytes32" , args); - } - - // setBytesArray is a paid mutator transaction binding the contract method 0x5119655d. - // - // Solidity: function setBytesArray(bytes[2] bs_a) returns(bytes[2]) - public Transaction setBytesArray(TransactOpts opts, Binaries bs_a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBinaries(bs_a);args.set(0,arg0); - - return this.Contract.transact(opts, "setBytesArray" , args); - } - - // setBytesList is a paid mutator transaction binding the contract method 0x16c105e2. - // - // Solidity: function setBytesList(bytes[] bs_l) returns(bytes[]) - public Transaction setBytesList(TransactOpts opts, Binaries bs_l) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBinaries(bs_l);args.set(0,arg0); - - return this.Contract.transact(opts, "setBytesList" , args); - } - - // setInt16 is a paid mutator transaction binding the contract method 0x86114cea. - // - // Solidity: function setInt16(int16 i16) returns(int16) - public Transaction setInt16(TransactOpts opts, short i16) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setInt16(i16);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt16" , args); - } - - // setInt256 is a paid mutator transaction binding the contract method 0xa53b1c1e. - // - // Solidity: function setInt256(int256 i256) returns(int256) - public Transaction setInt256(TransactOpts opts, BigInt i256) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBigInt(i256);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt256" , args); - } - - // setInt256Array is a paid mutator transaction binding the contract method 0x6aa482fc. - // - // Solidity: function setInt256Array(int256[2] i256_a) returns(int256[2]) - public Transaction setInt256Array(TransactOpts opts, BigInts i256_a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBigInts(i256_a);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt256Array" , args); - } - - // setInt256List is a paid mutator transaction binding the contract method 0x22722302. - // - // Solidity: function setInt256List(int256[] i256_l) returns(int256[]) - public Transaction setInt256List(TransactOpts opts, BigInts i256_l) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBigInts(i256_l);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt256List" , args); - } - - // setInt32 is a paid mutator transaction binding the contract method 0xb7d5df31. - // - // Solidity: function setInt32(int32 i32) returns(int32) - public Transaction setInt32(TransactOpts opts, int i32) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setInt32(i32);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt32" , args); - } - - // setInt64 is a paid mutator transaction binding the contract method 0xe673eb32. - // - // Solidity: function setInt64(int64 i64) returns(int64) - public Transaction setInt64(TransactOpts opts, long i64) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setInt64(i64);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt64" , args); - } - - // setInt8 is a paid mutator transaction binding the contract method 0x9a19a953. - // - // Solidity: function setInt8(int8 i8) returns(int8) - public Transaction setInt8(TransactOpts opts, byte i8) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setInt8(i8);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt8" , args); - } - - // setString is a paid mutator transaction binding the contract method 0x7fcaf666. - // - // Solidity: function setString(string s) returns(string) - public Transaction setString(TransactOpts opts, String s) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setString(s);args.set(0,arg0); - - return this.Contract.transact(opts, "setString" , args); - } - - // setStringArray is a paid mutator transaction binding the contract method 0x7173b695. - // - // Solidity: function setStringArray(string[2] s_a) returns(string[2]) - public Transaction setStringArray(TransactOpts opts, Strings s_a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setStrings(s_a);args.set(0,arg0); - - return this.Contract.transact(opts, "setStringArray" , args); - } - - // setStringList is a paid mutator transaction binding the contract method 0xc5777961. - // - // Solidity: function setStringList(string[] s_l) returns(string[]) - public Transaction setStringList(TransactOpts opts, Strings s_l) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setStrings(s_l);args.set(0,arg0); - - return this.Contract.transact(opts, "setStringList" , args); - } - - // setUint16 is a paid mutator transaction binding the contract method 0x0477988a. - // - // Solidity: function setUint16(uint16 u16) returns(uint16) - public Transaction setUint16(TransactOpts opts, BigInt u16) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setUint16(u16);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint16" , args); - } - - // setUint256 is a paid mutator transaction binding the contract method 0xd2282dc5. - // - // Solidity: function setUint256(uint256 u256) returns(uint256) - public Transaction setUint256(TransactOpts opts, BigInt u256) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBigInt(u256);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint256" , args); - } - - // setUint256Array is a paid mutator transaction binding the contract method 0x2766a755. - // - // Solidity: function setUint256Array(uint256[2] u256_a) returns(uint256[2]) - public Transaction setUint256Array(TransactOpts opts, BigInts u256_a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBigInts(u256_a);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint256Array" , args); - } - - // setUint256List is a paid mutator transaction binding the contract method 0xa0709e19. - // - // Solidity: function setUint256List(uint256[] u256_l) returns(uint256[]) - public Transaction setUint256List(TransactOpts opts, BigInts u256_l) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBigInts(u256_l);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint256List" , args); - } - - // setUint32 is a paid mutator transaction binding the contract method 0x1c9352e2. - // - // Solidity: function setUint32(uint32 u32) returns(uint32) - public Transaction setUint32(TransactOpts opts, BigInt u32) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setUint32(u32);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint32" , args); - } - - // setUint64 is a paid mutator transaction binding the contract method 0x822cba69. - // - // Solidity: function setUint64(uint64 u64) returns(uint64) - public Transaction setUint64(TransactOpts opts, BigInt u64) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setUint64(u64);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint64" , args); - } - - // setUint8 is a paid mutator transaction binding the contract method 0x1774e646. - // - // Solidity: function setUint8(uint8 u8) returns(uint8) - public Transaction setUint8(TransactOpts opts, BigInt u8) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setUint8(u8);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint8" , args); - } -} -`, - }, - } - for i, c := range cases { - binding, err := Bind([]string{c.name}, []string{c.abi}, []string{c.bytecode}, nil, "bindtest", LangJava, nil, nil) - if err != nil { - t.Fatalf("test %d: failed to generate binding: %v", i, err) - } - // Remove empty lines - removeEmptys := func(input string) string { - lines := strings.Split(input, "\n") - var index int - for _, line := range lines { - if strings.TrimSpace(line) != "" { - lines[index] = line - index += 1 - } - } - lines = lines[:index] - return strings.Join(lines, "\n") - } - binding = removeEmptys(binding) - expect := removeEmptys(c.expected) - if binding != expect { - t.Fatalf("test %d: generated binding mismatch, has %s, want %s", i, binding, c.expected) - } - } -} -*/ diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go index c0c7b59926..796512b59e 100644 --- a/accounts/abi/bind/template.go +++ b/accounts/abi/bind/template.go @@ -75,8 +75,7 @@ type tmplStruct struct { // tmplSource is language to template mapping containing all the supported // programming languages the package can generate to. var tmplSource = map[Lang]string{ - LangGo: tmplSourceGo, - LangJava: tmplSourceJava, + LangGo: tmplSourceGo, } // tmplSourceGo is the Go source template that the generated Go contract binding @@ -569,140 +568,3 @@ var ( {{end}} {{end}} ` - -// tmplSourceJava is the Java source template that the generated Java contract binding -// is based on. -const tmplSourceJava = ` -// This file is an automatically generated Java binding. Do not modify as any -// change will likely be lost upon the next re-generation! - -package {{.Package}}; - -import org.platon.geth.*; -import java.util.*; - -{{$structs := .Structs}} -{{range $contract := .Contracts}} -{{if not .Library}}public {{end}}class {{.Type}} { - // ABI is the input ABI used to generate the binding from. - public final static String ABI = "{{.InputABI}}"; - {{if $contract.FuncSigs}} - // {{.Type}}FuncSigs maps the 4-byte function signature to its string representation. - public final static Map {{.Type}}FuncSigs; - static { - Hashtable temp = new Hashtable(); - {{range $strsig, $binsig := .FuncSigs}}temp.put("{{$binsig}}", "{{$strsig}}"); - {{end}} - {{.Type}}FuncSigs = Collections.unmodifiableMap(temp); - } - {{end}} - {{if .InputBin}} - // BYTECODE is the compiled bytecode used for deploying new contracts. - public final static String BYTECODE = "0x{{.InputBin}}"; - - // deploy deploys a new platon contract, binding an instance of {{.Type}} to it. - public static {{.Type}} deploy(TransactOpts auth, platonClient client{{range .Constructor.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception { - Interfaces args = Geth.newInterfaces({{(len .Constructor.Inputs)}}); - String bytecode = BYTECODE; - {{if .Libraries}} - - // "link" contract to dependent libraries by deploying them first. - {{range $pattern, $name := .Libraries}} - {{capitalise $name}} {{decapitalise $name}}Inst = {{capitalise $name}}.deploy(auth, client); - bytecode = bytecode.replace("__${{$pattern}}$__", {{decapitalise $name}}Inst.Address.getHex().substring(2)); - {{end}} - {{end}} - {{range $index, $element := .Constructor.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}}); - {{end}} - return new {{.Type}}(Geth.deployContract(auth, ABI, Geth.decodeFromHex(bytecode), client, args)); - } - - // Internal constructor used by contract deployment. - private {{.Type}}(BoundContract deployment) { - this.Address = deployment.getAddress(); - this.Deployer = deployment.getDeployer(); - this.Contract = deployment; - } - {{end}} - - // platon address where this contract is located at. - public final Address Address; - - // platon transaction in which this contract was deployed (if known!). - public final Transaction Deployer; - - // Contract instance bound to a blockchain address. - private final BoundContract Contract; - - // Creates a new instance of {{.Type}}, bound to a specific deployed contract. - public {{.Type}}(Address address, platonClient client) throws Exception { - this(Geth.bindContract(address, ABI, client)); - } - - {{range .Calls}} - {{if gt (len .Normalized.Outputs) 1}} - // {{capitalise .Normalized.Name}}Results is the output of a call to {{.Normalized.Name}}. - public class {{capitalise .Normalized.Name}}Results { - {{range $index, $item := .Normalized.Outputs}}public {{bindtype .Type $structs}} {{if ne .Name ""}}{{.Name}}{{else}}Return{{$index}}{{end}}; - {{end}} - } - {{end}} - - // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}. - // - // Solidity: {{.Original.String}} - public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else if eq (len .Normalized.Outputs) 0}}void{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception { - Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}}); - {{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}}); - {{end}} - - Interfaces results = Geth.newInterfaces({{(len .Normalized.Outputs)}}); - {{range $index, $item := .Normalized.Outputs}}Interface result{{$index}} = Geth.newInterface(); result{{$index}}.setDefault{{namedtype (bindtype .Type $structs) .Type}}(); results.set({{$index}}, result{{$index}}); - {{end}} - - if (opts == null) { - opts = Geth.newCallOpts(); - } - this.Contract.call(opts, results, "{{.Original.Name}}", args); - {{if gt (len .Normalized.Outputs) 1}} - {{capitalise .Normalized.Name}}Results result = new {{capitalise .Normalized.Name}}Results(); - {{range $index, $item := .Normalized.Outputs}}result.{{if ne .Name ""}}{{.Name}}{{else}}Return{{$index}}{{end}} = results.get({{$index}}).get{{namedtype (bindtype .Type $structs) .Type}}(); - {{end}} - return result; - {{else}}{{range .Normalized.Outputs}}return results.get(0).get{{namedtype (bindtype .Type $structs) .Type}}();{{end}} - {{end}} - } - {{end}} - - {{range .Transacts}} - // {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}. - // - // Solidity: {{.Original.String}} - public Transaction {{.Normalized.Name}}(TransactOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception { - Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}}); - {{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}}); - {{end}} - return this.Contract.transact(opts, "{{.Original.Name}}" , args); - } - {{end}} - - {{if .Fallback}} - // Fallback is a paid mutator transaction binding the contract fallback function. - // - // Solidity: {{.Fallback.Original.String}} - public Transaction Fallback(TransactOpts opts, byte[] calldata) throws Exception { - return this.Contract.rawTransact(opts, calldata); - } - {{end}} - - {{if .Receive}} - // Receive is a paid mutator transaction binding the contract receive function. - // - // Solidity: {{.Receive.Original.String}} - public Transaction Receive(TransactOpts opts) throws Exception { - return this.Contract.rawTransact(opts, null); - } - {{end}} -} -{{end}} -` diff --git a/accounts/abi/error_handing.go b/accounts/abi/error_handling.go similarity index 81% rename from accounts/abi/error_handing.go rename to accounts/abi/error_handling.go index 7add707292..c106e9ac43 100644 --- a/accounts/abi/error_handing.go +++ b/accounts/abi/error_handling.go @@ -23,7 +23,15 @@ import ( ) var ( - errBadBool = errors.New("abi: improperly encoded boolean value") + errBadBool = errors.New("abi: improperly encoded boolean value") + errBadUint8 = errors.New("abi: improperly encoded uint8 value") + errBadUint16 = errors.New("abi: improperly encoded uint16 value") + errBadUint32 = errors.New("abi: improperly encoded uint32 value") + errBadUint64 = errors.New("abi: improperly encoded uint64 value") + errBadInt8 = errors.New("abi: improperly encoded int8 value") + errBadInt16 = errors.New("abi: improperly encoded int16 value") + errBadInt32 = errors.New("abi: improperly encoded int32 value") + errBadInt64 = errors.New("abi: improperly encoded int64 value") ) // formatSliceString formats the reflection kind with the given slice size diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go index dad05610be..e55cdaed05 100644 --- a/accounts/abi/unpack.go +++ b/accounts/abi/unpack.go @@ -19,6 +19,7 @@ package abi import ( "encoding/binary" "fmt" + "math" "math/big" "reflect" @@ -33,43 +34,72 @@ var ( ) // ReadInteger reads the integer based on its kind and returns the appropriate value. -func ReadInteger(typ Type, b []byte) interface{} { +func ReadInteger(typ Type, b []byte) (interface{}, error) { + ret := new(big.Int).SetBytes(b) + if typ.T == UintTy { + u64, isu64 := ret.Uint64(), ret.IsUint64() switch typ.Size { case 8: - return b[len(b)-1] + if !isu64 || u64 > math.MaxUint8 { + return nil, errBadUint8 + } + return byte(u64), nil case 16: - return binary.BigEndian.Uint16(b[len(b)-2:]) + if !isu64 || u64 > math.MaxUint16 { + return nil, errBadUint16 + } + return uint16(u64), nil case 32: - return binary.BigEndian.Uint32(b[len(b)-4:]) + if !isu64 || u64 > math.MaxUint32 { + return nil, errBadUint32 + } + return uint32(u64), nil case 64: - return binary.BigEndian.Uint64(b[len(b)-8:]) + if !isu64 { + return nil, errBadUint64 + } + return u64, nil default: // the only case left for unsigned integer is uint256. - return new(big.Int).SetBytes(b) + return ret, nil } } + + // big.SetBytes can't tell if a number is negative or positive in itself. + // On EVM, if the returned number > max int256, it is negative. + // A number is > max int256 if the bit at position 255 is set. + if ret.Bit(255) == 1 { + ret.Add(MaxUint256, new(big.Int).Neg(ret)) + ret.Add(ret, common.Big1) + ret.Neg(ret) + } + i64, isi64 := ret.Int64(), ret.IsInt64() switch typ.Size { case 8: - return int8(b[len(b)-1]) + if !isi64 || i64 < math.MinInt8 || i64 > math.MaxInt8 { + return nil, errBadInt8 + } + return int8(i64), nil case 16: - return int16(binary.BigEndian.Uint16(b[len(b)-2:])) + if !isi64 || i64 < math.MinInt16 || i64 > math.MaxInt16 { + return nil, errBadInt16 + } + return int16(i64), nil case 32: - return int32(binary.BigEndian.Uint32(b[len(b)-4:])) + if !isi64 || i64 < math.MinInt32 || i64 > math.MaxInt32 { + return nil, errBadInt32 + } + return int32(i64), nil case 64: - return int64(binary.BigEndian.Uint64(b[len(b)-8:])) + if !isi64 { + return nil, errBadInt64 + } + return i64, nil default: // the only case left for integer is int256 - // big.SetBytes can't tell if a number is negative or positive in itself. - // On EVM, if the returned number > max int256, it is negative. - // A number is > max int256 if the bit at position 255 is set. - ret := new(big.Int).SetBytes(b) - if ret.Bit(255) == 1 { - ret.Add(MaxUint256, new(big.Int).Neg(ret)) - ret.Add(ret, common.Big1) - ret.Neg(ret) - } - return ret + + return ret, nil } } @@ -162,6 +192,9 @@ func forTupleUnpack(t Type, output []byte) (interface{}, error) { virtualArgs := 0 for index, elem := range t.TupleElems { marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output) + if err != nil { + return nil, err + } if elem.T == ArrayTy && !isDynamicType(*elem) { // If we have a static array, like [3]uint256, these are coded as // just like uint256,uint256,uint256. @@ -179,9 +212,6 @@ func forTupleUnpack(t Type, output []byte) (interface{}, error) { // coded as just like uint256,bool,uint256 virtualArgs += getTypeSize(*elem)/32 - 1 } - if err != nil { - return nil, err - } retval.Field(index).Set(reflect.ValueOf(marshalledValue)) } return retval.Interface(), nil @@ -234,7 +264,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) { case StringTy: // variable arrays are written at the end of the return bytes return string(output[begin : begin+length]), nil case IntTy, UintTy: - return ReadInteger(t, returnOutput), nil + return ReadInteger(t, returnOutput) case BoolTy: return readBool(returnOutput) case AddressTy: diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go index b0be9e47c1..035c48573d 100644 --- a/accounts/abi/unpack_test.go +++ b/accounts/abi/unpack_test.go @@ -20,6 +20,7 @@ import ( "bytes" "encoding/hex" "fmt" + "math" "math/big" "reflect" "strconv" @@ -944,3 +945,164 @@ func TestOOMMaliciousInput(t *testing.T) { } } } + +func TestPackAndUnpackIncompatibleNumber(t *testing.T) { + var encodeABI Arguments + uint256Ty, err := NewType("uint256", "", nil) + if err != nil { + panic(err) + } + encodeABI = Arguments{ + {Type: uint256Ty}, + } + + maxU64, ok := new(big.Int).SetString(strconv.FormatUint(math.MaxUint64, 10), 10) + if !ok { + panic("bug") + } + maxU64Plus1 := new(big.Int).Add(maxU64, big.NewInt(1)) + cases := []struct { + decodeType string + inputValue *big.Int + err error + expectValue interface{} + }{ + { + decodeType: "uint8", + inputValue: big.NewInt(math.MaxUint8 + 1), + err: errBadUint8, + }, + { + decodeType: "uint8", + inputValue: big.NewInt(math.MaxUint8), + err: nil, + expectValue: uint8(math.MaxUint8), + }, + { + decodeType: "uint16", + inputValue: big.NewInt(math.MaxUint16 + 1), + err: errBadUint16, + }, + { + decodeType: "uint16", + inputValue: big.NewInt(math.MaxUint16), + err: nil, + expectValue: uint16(math.MaxUint16), + }, + { + decodeType: "uint32", + inputValue: big.NewInt(math.MaxUint32 + 1), + err: errBadUint32, + }, + { + decodeType: "uint32", + inputValue: big.NewInt(math.MaxUint32), + err: nil, + expectValue: uint32(math.MaxUint32), + }, + { + decodeType: "uint64", + inputValue: maxU64Plus1, + err: errBadUint64, + }, + { + decodeType: "uint64", + inputValue: maxU64, + err: nil, + expectValue: uint64(math.MaxUint64), + }, + { + decodeType: "uint256", + inputValue: maxU64Plus1, + err: nil, + expectValue: maxU64Plus1, + }, + { + decodeType: "int8", + inputValue: big.NewInt(math.MaxInt8 + 1), + err: errBadInt8, + }, + { + decodeType: "int8", + inputValue: big.NewInt(math.MinInt8 - 1), + err: errBadInt8, + }, + { + decodeType: "int8", + inputValue: big.NewInt(math.MaxInt8), + err: nil, + expectValue: int8(math.MaxInt8), + }, + { + decodeType: "int16", + inputValue: big.NewInt(math.MaxInt16 + 1), + err: errBadInt16, + }, + { + decodeType: "int16", + inputValue: big.NewInt(math.MinInt16 - 1), + err: errBadInt16, + }, + { + decodeType: "int16", + inputValue: big.NewInt(math.MaxInt16), + err: nil, + expectValue: int16(math.MaxInt16), + }, + { + decodeType: "int32", + inputValue: big.NewInt(math.MaxInt32 + 1), + err: errBadInt32, + }, + { + decodeType: "int32", + inputValue: big.NewInt(math.MinInt32 - 1), + err: errBadInt32, + }, + { + decodeType: "int32", + inputValue: big.NewInt(math.MaxInt32), + err: nil, + expectValue: int32(math.MaxInt32), + }, + { + decodeType: "int64", + inputValue: new(big.Int).Add(big.NewInt(math.MaxInt64), big.NewInt(1)), + err: errBadInt64, + }, + { + decodeType: "int64", + inputValue: new(big.Int).Sub(big.NewInt(math.MinInt64), big.NewInt(1)), + err: errBadInt64, + }, + { + decodeType: "int64", + inputValue: big.NewInt(math.MaxInt64), + err: nil, + expectValue: int64(math.MaxInt64), + }, + } + for i, testCase := range cases { + packed, err := encodeABI.Pack(testCase.inputValue) + if err != nil { + panic(err) + } + ty, err := NewType(testCase.decodeType, "", nil) + if err != nil { + panic(err) + } + decodeABI := Arguments{ + {Type: ty}, + } + decoded, err := decodeABI.Unpack(packed) + if err != testCase.err { + t.Fatalf("Expected error %v, actual error %v. case %d", testCase.err, err, i) + } + if err != nil { + continue + } + if !reflect.DeepEqual(decoded[0], testCase.expectValue) { + t.Fatalf("Expected value %v, actual value %v", testCase.expectValue, decoded[0]) + } + } +} diff --git a/build/checksums.txt b/build/checksums.txt index dff0c08c77..ca6bd4f6de 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,19 +1,19 @@ # This file contains sha256 checksums of optional build dependencies. -efd43e0f1402e083b73a03d444b7b6576bb4c539ac46208b63a916b69aca4088 go1.18.1.src.tar.gz -3703e9a0db1000f18c0c7b524f3d378aac71219b4715a6a4c5683eb639f41a4d go1.18.1.darwin-amd64.tar.gz -6d5641a06edba8cd6d425fb0adad06bad80e2afe0fa91b4aa0e5aed1bc78f58e go1.18.1.darwin-arm64.tar.gz -b9a9063d4265d8ccc046c9b314194d6eadc47e56d0d637db81e98e68aad45035 go1.18.1.freebsd-386.tar.gz -2bc1c138d645e37dbbc63517dd1cf1bf33fc4cb95f442a6384df0418b5134e9f go1.18.1.freebsd-amd64.tar.gz -9a8df5dde9058f08ac01ecfaae42534610db398e487138788c01da26a0d41ff9 go1.18.1.linux-386.tar.gz -b3b815f47ababac13810fc6021eb73d65478e0b2db4b09d348eefad9581a2334 go1.18.1.linux-amd64.tar.gz -56a91851c97fb4697077abbca38860f735c32b38993ff79b088dac46e4735633 go1.18.1.linux-arm64.tar.gz -9edc01c8e7db64e9ceeffc8258359e027812886ceca3444e83c4eb96ddb068ee go1.18.1.linux-armv6l.tar.gz -33db623d1eecf362fe365107c12efc90eff0b9609e0b3345e258388019cb552a go1.18.1.linux-ppc64le.tar.gz -5d9301324148ed4dbfaa0800da43a843ffd65c834ee73fcf087255697c925f74 go1.18.1.linux-s390x.tar.gz -49ae65551acbfaa57b52fbefa0350b2072512ae3103b8cf1a919a02626dbc743 go1.18.1.windows-386.zip -c30bc3f1f7314a953fe208bd9cd5e24bd9403392a6c556ced3677f9f70f71fe1 go1.18.1.windows-amd64.zip -2c4a8265030eac37f906634f5c13c22c3d0ea725f2488e1bca005c6b981653d7 go1.18.1.windows-arm64.zip +8e486e8e85a281fc5ce3f0bedc5b9d2dbf6276d7db0b25d3ec034f313da0375f go1.19.5.src.tar.gz +23d22bb6571bbd60197bee8aaa10e702f9802786c2e2ddce5c84527e86b66aa0 go1.19.5.darwin-amd64.tar.gz +4a67f2bf0601afe2177eb58f825adf83509511d77ab79174db0712dc9efa16c8 go1.19.5.darwin-arm64.tar.gz +b18a5e1e60130003896e1d1c8a9e142c7cccf6f5063d52c55dd42006434419c1 go1.19.5.freebsd-386.tar.gz +317996f7427691ff3a7ffd1b6aa089b9c66cd76f32e9107443f2f6aad1bb568a go1.19.5.freebsd-amd64.tar.gz +f68331aa7458a3598060595f5601d5731fd452bb2c62ff23095ddad68854e510 go1.19.5.linux-386.tar.gz +36519702ae2fd573c9869461990ae550c8c0d955cd28d2827a6b159fda81ff95 go1.19.5.linux-amd64.tar.gz +fc0aa29c933cec8d76f5435d859aaf42249aa08c74eb2d154689ae44c08d23b3 go1.19.5.linux-arm64.tar.gz +ec14f04bdaf4a62bdcf8b55b9b6434cc27c2df7d214d0bb7076a7597283b026a go1.19.5.linux-armv6l.tar.gz +e4032e7c52ebc48bad5c58ba8de0759b6091d9b1e59581a8a521c8c9d88dbe93 go1.19.5.linux-ppc64le.tar.gz +764871cbca841a99a24e239b63c68a4aaff4104658e3165e9ca450cac1fcbea3 go1.19.5.linux-s390x.tar.gz +8873f5871d996106b701febd979c5af022e6ea58bdbbb3817a28ab948b22c286 go1.19.5.windows-386.zip +167db91a2e40aeb453d3e59d213ecab06f62e1c4a84d13a06ccda1d999961caa go1.19.5.windows-amd64.zip +85a75555e82d8aa6f486d8d29491c593389682acce9f0c270090d5938eee30ef go1.19.5.windows-arm64.zip fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz 75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz diff --git a/build/ci.go b/build/ci.go index 44b01c6456..43d08b3374 100644 --- a/build/ci.go +++ b/build/ci.go @@ -31,8 +31,6 @@ Available commands are: importkeys -- imports signing keys from env debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package nsis -- creates a Windows NSIS installer - aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive - xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore For all commands, -n prevents execution of external programs (dry run mode). @@ -40,7 +38,6 @@ For all commands, -n prevents execution of external programs (dry run mode). package main import ( - "bufio" "bytes" "encoding/base64" "flag" @@ -50,7 +47,6 @@ import ( "os/exec" "path" "path/filepath" - "regexp" "runtime" "strconv" "strings" @@ -138,7 +134,7 @@ var ( // This is the version of go that will be downloaded by // // go run ci.go install -dlgo - dlgoVersion = "1.18.5" + dlgoVersion = "1.19.5" ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) @@ -174,10 +170,6 @@ func main() { doDebianSource(os.Args[2:]) case "nsis": doWindowsInstaller(os.Args[2:]) - case "aar": - doAndroidArchive(os.Args[2:]) - case "xcode": - doXCodeFramework(os.Args[2:]) case "purge": doPurge(os.Args[2:]) default: @@ -986,236 +978,6 @@ func doWindowsInstaller(cmdline []string) { } } -// Android archives - -func doAndroidArchive(cmdline []string) { - var ( - local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`) - signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. ANDROID_SIGNIFY_KEY)`) - deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`) - upload = flag.String("upload", "", `Destination to upload the archive (usually "gethstore/builds")`) - ) - flag.CommandLine.Parse(cmdline) - env := build.Env() - tc := new(build.GoToolchain) - - // Sanity check that the SDK and NDK are installed and set - if os.Getenv("ANDROID_HOME") == "" { - log.Fatal("Please ensure ANDROID_HOME points to your Android SDK") - } - - // Build gomobile. - install := tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile@latest", "golang.org/x/mobile/cmd/gobind@latest") - install.Env = append(install.Env) - build.MustRun(install) - - // Ensure all dependencies are available. This is required to make - // gomobile bind work because it expects go.sum to contain all checksums. - build.MustRun(tc.Go("mod", "download")) - - // Build the Android archive and Maven resources - build.MustRun(gomobileTool("bind", "-ldflags", "-s -w", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile")) - - if *local { - // If we're building locally, copy bundle to build dir and skip Maven - os.Rename("platon.aar", filepath.Join(GOBIN, "platon.aar")) - os.Rename("platon-sources.jar", filepath.Join(GOBIN, "platon-sources.jar")) - return - } - meta := newMavenMetadata(env) - build.Render("build/mvn.pom", meta.Package+".pom", 0755, meta) - - // Skip Maven deploy and Azure upload for PR builds - maybeSkipArchive(env) - - // Sign and upload the archive to Azure - archive := "platon-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar" - os.Rename("platon.aar", archive) - - if err := archiveUpload(archive, *upload, *signer, *signify); err != nil { - log.Fatal(err) - } - // Sign and upload all the artifacts to Maven Central - os.Rename(archive, meta.Package+".aar") - if *signer != "" && *deploy != "" { - // Import the signing key into the local GPG instance - key := getenvBase64(*signer) - gpg := exec.Command("gpg", "--import") - gpg.Stdin = bytes.NewReader(key) - build.MustRun(gpg) - keyID, err := build.PGPKeyID(string(key)) - if err != nil { - log.Fatal(err) - } - // Upload the artifacts to Sonatype and/or Maven Central - repo := *deploy + "/service/local/staging/deploy/maven2" - if meta.Develop { - repo = *deploy + "/content/repositories/snapshots" - } - build.MustRunCommand("mvn", "gpg:sign-and-deploy-file", "-e", "-X", - "-settings=build/mvn.settings", "-Durl="+repo, "-DrepositoryId=ossrh", - "-Dgpg.keyname="+keyID, - "-DpomFile="+meta.Package+".pom", "-Dfile="+meta.Package+".aar") - } -} - -func gomobileTool(subcmd string, args ...string) *exec.Cmd { - cmd := exec.Command(filepath.Join(GOBIN, "gomobile"), subcmd) - cmd.Args = append(cmd.Args, args...) - cmd.Env = []string{ - "PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"), - } - for _, e := range os.Environ() { - if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") || strings.HasPrefix(e, "GOBIN=") { - continue - } - cmd.Env = append(cmd.Env, e) - } - cmd.Env = append(cmd.Env, "GOBIN="+GOBIN) - return cmd -} - -type mavenMetadata struct { - Version string - Package string - Develop bool - Contributors []mavenContributor -} - -type mavenContributor struct { - Name string - Email string -} - -func newMavenMetadata(env build.Environment) mavenMetadata { - // Collect the list of authors from the repo root - contribs := []mavenContributor{} - if authors, err := os.Open("AUTHORS"); err == nil { - defer authors.Close() - - scanner := bufio.NewScanner(authors) - for scanner.Scan() { - // Skip any whitespace from the authors list - line := strings.TrimSpace(scanner.Text()) - if line == "" || line[0] == '#' { - continue - } - // Split the author and insert as a contributor - re := regexp.MustCompile("([^<]+) <(.+)>") - parts := re.FindStringSubmatch(line) - if len(parts) == 3 { - contribs = append(contribs, mavenContributor{Name: parts[1], Email: parts[2]}) - } - } - } - // Render the version and package strings - version := params.Version - if isUnstableBuild(env) { - version += "-SNAPSHOT" - } - return mavenMetadata{ - Version: version, - Package: "platon-" + version, - Develop: isUnstableBuild(env), - Contributors: contribs, - } -} - -// XCode frameworks - -func doXCodeFramework(cmdline []string) { - var ( - local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`) - signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. IOS_SIGNIFY_KEY)`) - deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`) - upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) - ) - flag.CommandLine.Parse(cmdline) - env := build.Env() - tc := new(build.GoToolchain) - - // Build gomobile. - build.MustRun(tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind")) - - // Build the iOS XCode framework - bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "-v", "github.com/PlatONnetwork/PlatON-Go/mobile") - - if *local { - // If we're building locally, use the build folder and stop afterwards - bind.Dir = GOBIN - build.MustRun(bind) - return - } - - // Create the archive. - maybeSkipArchive(env) - archive := "platon-" + archiveBasename("ios", params.ArchiveVersion(env.Commit)) - if err := os.MkdirAll(archive, 0755); err != nil { - log.Fatal(err) - } - bind.Dir, _ = filepath.Abs(archive) - build.MustRun(bind) - build.MustRunCommand("tar", "-zcvf", archive+".tar.gz", archive) - - // Sign and upload the framework to Azure - if err := archiveUpload(archive+".tar.gz", *upload, *signer, *signify); err != nil { - log.Fatal(err) - } - // Prepare and upload a PodSpec to CocoaPods - if *deploy != "" { - meta := newPodMetadata(env, archive) - build.Render("build/pod.podspec", "Geth.podspec", 0755, meta) - build.MustRunCommand("pod", *deploy, "push", "Geth.podspec", "--allow-warnings") - } -} - -type podMetadata struct { - Version string - Commit string - Archive string - Contributors []podContributor -} - -type podContributor struct { - Name string - Email string -} - -func newPodMetadata(env build.Environment, archive string) podMetadata { - // Collect the list of authors from the repo root - contribs := []podContributor{} - if authors, err := os.Open("AUTHORS"); err == nil { - defer authors.Close() - - scanner := bufio.NewScanner(authors) - for scanner.Scan() { - // Skip any whitespace from the authors list - line := strings.TrimSpace(scanner.Text()) - if line == "" || line[0] == '#' { - continue - } - // Split the author and insert as a contributor - re := regexp.MustCompile("([^<]+) <(.+)>") - parts := re.FindStringSubmatch(line) - if len(parts) == 3 { - contribs = append(contribs, podContributor{Name: parts[1], Email: parts[2]}) - } - } - } - version := params.Version - if isUnstableBuild(env) { - version += "-unstable." + env.Buildnum - } - return podMetadata{ - Archive: archive, - Version: version, - Commit: env.Commit, - Contributors: contribs, - } -} - // Binary distribution cleanups func doPurge(cmdline []string) { diff --git a/build/mvn.pom b/build/mvn.pom deleted file mode 100644 index 7670246ba9..0000000000 --- a/build/mvn.pom +++ /dev/null @@ -1,57 +0,0 @@ - - 4.0.0 - - org.ethereum - geth - {{.Version}} - aar - - Android Ethereum Client - Android port of the go-ethereum libraries and node - https://github.com/ethereum/go-ethereum - 2015 - - - - GNU Lesser General Public License, Version 3.0 - https://www.gnu.org/licenses/lgpl-3.0.en.html - repo - - - - - Ethereum - https://ethereum.org - - - - - karalabe - Péter Szilágyi - peterke@gmail.com - https://github.com/karalabe - - https://www.gravatar.com/avatar/2ecbf0f5b4b79eebf8c193e5d324357f?s=256 - - - - - {{range .Contributors}} - - {{.Name}} - {{.Email}} - {{end}} - - - - GitHub Issues - https://github.com/ethereum/go-ethereum/issues/ - - - - https://github.com/ethereum/go-ethereum - - diff --git a/build/mvn.settings b/build/mvn.settings deleted file mode 100644 index 406b409b9b..0000000000 --- a/build/mvn.settings +++ /dev/null @@ -1,24 +0,0 @@ - - - - ossrh - ${env.ANDROID_SONATYPE_USERNAME} - ${env.ANDROID_SONATYPE_PASSWORD} - - - - - ossrh - - true - - - gpg - - - - - diff --git a/build/pod.podspec b/build/pod.podspec deleted file mode 100644 index 2c14c280c7..0000000000 --- a/build/pod.podspec +++ /dev/null @@ -1,22 +0,0 @@ -Pod::Spec.new do |spec| - spec.name = 'Geth' - spec.version = '{{.Version}}' - spec.license = { :type => 'GNU Lesser General Public License, Version 3.0' } - spec.homepage = 'https://github.com/ethereum/go-ethereum' - spec.authors = { {{range .Contributors}} - '{{.Name}}' => '{{.Email}}',{{end}} - } - spec.summary = 'iOS Ethereum Client' - spec.source = { :git => 'https://github.com/ethereum/go-ethereum.git', :commit => '{{.Commit}}' } - - spec.platform = :ios - spec.ios.deployment_target = '9.0' - spec.ios.vendored_frameworks = 'Frameworks/Geth.framework' - - spec.prepare_command = <<-CMD - curl https://gethstore.blob.core.windows.net/builds/{{.Archive}}.tar.gz | tar -xvz - mkdir Frameworks - mv {{.Archive}}/Geth.framework Frameworks - rm -rf {{.Archive}} - CMD -end diff --git a/build/tools/tools.go b/build/tools/tools.go index fd2681a28b..506e26eeff 100644 --- a/build/tools/tools.go +++ b/build/tools/tools.go @@ -24,8 +24,4 @@ import ( _ "github.com/fjl/gencodec" _ "github.com/golang/protobuf/protoc-gen-go" _ "golang.org/x/tools/cmd/stringer" - - // Tool imports for mobile build. - _ "golang.org/x/mobile/cmd/gobind" - _ "golang.org/x/mobile/cmd/gomobile" ) diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go index 72cc74dc65..07c017fd89 100644 --- a/cmd/abigen/main.go +++ b/cmd/abigen/main.go @@ -72,7 +72,7 @@ var ( } langFlag = &cli.StringFlag{ Name: "lang", - Usage: "Destination language for the bindings (go, java, objc)", + Usage: "Destination language for the bindings (go)", Value: "go", } aliasFlag = &cli.StringFlag{ @@ -108,11 +108,6 @@ func abigen(c *cli.Context) error { switch c.String(langFlag.Name) { case "go": lang = bind.LangGo - case "java": - lang = bind.LangJava - case "objc": - lang = bind.LangObjC - utils.Fatalf("Objc binding generation is uncompleted") default: utils.Fatalf("Unsupported destination language \"%s\" (--lang)", c.String(langFlag.Name)) } diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go index 62fd0cfa28..ec7aa8edea 100644 --- a/cmd/bootnode/main.go +++ b/cmd/bootnode/main.go @@ -41,7 +41,7 @@ func main() { writeAddr = flag.Bool("writeaddress", false, "write out the node's pubkey hash and quit") nodeKeyFile = flag.String("nodekey", "", "private key filename") nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)") - natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:)") + natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|pmp:|extip:)") netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)") runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode") verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-5)") diff --git a/cmd/ctool/core/http_util.go b/cmd/ctool/core/http_util.go index 02ec708938..1e384b53ae 100644 --- a/cmd/ctool/core/http_util.go +++ b/cmd/ctool/core/http_util.go @@ -44,7 +44,7 @@ func HttpPost(param JsonParam) (string, error) { req, _ := json.Marshal(param) reqNew := bytes.NewBuffer(req) - request, _ := http.NewRequest("POST", config.Url, reqNew) + request, _ := http.NewRequest(http.MethodPost, config.Url, reqNew) request.Header.Set("Content-type", "application/json") response, err := client.Do(request) if response == nil && err != nil { diff --git a/cmd/platon/consolecmd.go b/cmd/platon/consolecmd.go index c1113040c4..b14e37a7c9 100644 --- a/cmd/platon/consolecmd.go +++ b/cmd/platon/consolecmd.go @@ -78,7 +78,7 @@ func localConsole(ctx *cli.Context) error { // Attach to the newly started node and create the JavaScript console client, err := stack.Attach() if err != nil { - return fmt.Errorf("Failed to attach to the inproc platon: %v", err) + return fmt.Errorf("failed to attach to the inproc platon: %v", err) } config := console.Config{ DataDir: utils.MakeDataDir(ctx), @@ -89,7 +89,7 @@ func localConsole(ctx *cli.Context) error { console, err := console.New(config) if err != nil { - return fmt.Errorf("Failed to start the JavaScript console: %v", err) + return fmt.Errorf("failed to start the JavaScript console: %v", err) } defer console.Stop(false) diff --git a/cmd/platon/consolecmd_test.go b/cmd/platon/consolecmd_test.go index 3712d94a1a..e531ad55a5 100644 --- a/cmd/platon/consolecmd_test.go +++ b/cmd/platon/consolecmd_test.go @@ -30,7 +30,7 @@ import ( ) const ( - ipcAPIs = "admin:1.0 debug:1.0 miner:1.0 net:1.0 personal:1.0 platon:1.0 rpc:1.0 txgen:1.0 txpool:1.0 web3:1.0" + ipcAPIs = "admin:1.0 debug:1.0 miner:1.0 net:1.0 platon:1.0 rpc:1.0 txgen:1.0 txpool:1.0 web3:1.0" httpAPIs = "net:1.0 platon:1.0 rpc:1.0 web3:1.0" ) diff --git a/cmd/platon/snapshot.go b/cmd/platon/snapshot.go index 27ac401ea9..00bf74d5c6 100644 --- a/cmd/platon/snapshot.go +++ b/cmd/platon/snapshot.go @@ -167,6 +167,8 @@ block is used. } ) +// Deprecation: this command should be deprecated once the hash-based +// scheme is deprecated. func pruneState(ctx *cli.Context) error { stack, config := makeConfigNode(ctx) defer stack.Close() @@ -397,10 +399,10 @@ func traverseRawState(ctx *cli.Context) error { nodes += 1 node := accIter.Hash() - // Check the present for non-empty hash node(embedded node doesn't + // Check the presence for non-empty hash node(embedded node doesn't // have their own hash). if node != (common.Hash{}) { - blob := rawdb.ReadTrieNode(chaindb, node) + blob := rawdb.ReadLegacyTrieNode(chaindb, node) if len(blob) == 0 { log.Error("Missing trie node(account)", "hash", node) return errors.New("missing account") @@ -436,7 +438,7 @@ func traverseRawState(ctx *cli.Context) error { // Check the present for non-empty hash node(embedded node doesn't // have their own hash). if node != (common.Hash{}) { - blob := rawdb.ReadTrieNode(chaindb, node) + blob := rawdb.ReadLegacyTrieNode(chaindb, node) if len(blob) == 0 { log.Error("Missing trie node(storage)", "hash", node) return errors.New("missing storage") diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index f392106750..2aee43f1e8 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -29,8 +29,6 @@ import ( "strings" "time" - gopsutil "github.com/shirou/gopsutil/mem" - "github.com/PlatONnetwork/PlatON-Go/accounts" "github.com/PlatONnetwork/PlatON-Go/accounts/keystore" "github.com/PlatONnetwork/PlatON-Go/common" @@ -38,6 +36,7 @@ import ( "github.com/PlatONnetwork/PlatON-Go/consensus" "github.com/PlatONnetwork/PlatON-Go/consensus/cbft/types" "github.com/PlatONnetwork/PlatON-Go/core" + "github.com/PlatONnetwork/PlatON-Go/core/rawdb" "github.com/PlatONnetwork/PlatON-Go/core/snapshotdb" types2 "github.com/PlatONnetwork/PlatON-Go/core/types" "github.com/PlatONnetwork/PlatON-Go/core/vm" @@ -66,7 +65,7 @@ import ( "github.com/PlatONnetwork/PlatON-Go/p2p/netutil" "github.com/PlatONnetwork/PlatON-Go/params" "github.com/PlatONnetwork/PlatON-Go/rpc" - + gopsutil "github.com/shirou/gopsutil/mem" "github.com/urfave/cli/v2" ) @@ -90,6 +89,12 @@ var ( Usage: "URL for remote database", Category: flags.LoggingCategory, } + DBEngineFlag = &cli.StringFlag{ + Name: "db.engine", + Usage: "Backing database implementation to use ('leveldb' or 'pebble')", + Value: "leveldb", + Category: flags.EthCategory, + } AncientFlag = &flags.DirectoryFlag{ Name: "datadir.ancient", Usage: "Root directory for ancient data (default = inside chaindata)", @@ -316,6 +321,11 @@ var ( Value: 2048, Category: flags.EthCategory, } + OverrideShanghai = &cli.Uint64Flag{ + Name: "override.shanghai", + Usage: "Manually specify the Shanghai fork timestamp, overriding the bundled setting", + Category: flags.EthCategory, + } CacheGCFlag = &cli.IntFlag{ Name: "cache.gc", Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)", @@ -559,6 +569,11 @@ var ( Usage: "Allow for unprotected (non EIP155 signed) transactions to be submitted via RPC", Category: flags.APICategory, } + EnablePersonal = &cli.BoolFlag{ + Name: "rpc.enabledeprecatedpersonal", + Usage: "Enables the (deprecated) personal namespace", + Category: flags.APICategory, + } // Network Settings MaxPeersFlag = &cli.IntFlag{ @@ -615,7 +630,7 @@ var ( } NATFlag = &cli.StringFlag{ Name: "nat", - Usage: "NAT port mapping mechanism (any|none|upnp|pmp|extip:)", + Usage: "NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:)", Value: "any", Category: flags.NetworkingCategory, } @@ -877,6 +892,12 @@ var ( } ) +func init() { + if rawdb.PebbleEnabled { + DatabasePathFlags = append(DatabasePathFlags, DBEngineFlag) + } +} + // MakeDataDir retrieves the currently requested data directory, terminating // if none (or the empty string) is specified. If the node is starting a testnet, // then a subdirectory of the specified datadir will be used. @@ -1253,6 +1274,10 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { cfg.JWTSecret = ctx.String(JWTSecretFlag.Name) } + if ctx.IsSet(EnablePersonal.Name) { + cfg.EnablePersonal = true + } + if ctx.IsSet(KeyStoreDirFlag.Name) { cfg.KeyStoreDir = ctx.String(KeyStoreDirFlag.Name) } @@ -1262,6 +1287,14 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { if ctx.IsSet(InsecureUnlockAllowedFlag.Name) { cfg.InsecureUnlockAllowed = ctx.Bool(InsecureUnlockAllowedFlag.Name) } + if ctx.IsSet(DBEngineFlag.Name) { + dbEngine := ctx.String(DBEngineFlag.Name) + if dbEngine != "leveldb" && dbEngine != "pebble" { + Fatalf("Invalid choice for db.engine '%s', allowed 'leveldb' or 'pebble'", dbEngine) + } + log.Info(fmt.Sprintf("Using %s as db engine", dbEngine)) + cfg.DBEngine = dbEngine + } } func SetDataDir(ctx *cli.Context, cfg *node.Config) { diff --git a/common/prque/lazyqueue.go b/common/prque/lazyqueue.go new file mode 100644 index 0000000000..840cd2ecc1 --- /dev/null +++ b/common/prque/lazyqueue.go @@ -0,0 +1,195 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package prque + +import ( + "container/heap" + "time" + + "github.com/PlatONnetwork/PlatON-Go/common/mclock" + "golang.org/x/exp/constraints" +) + +// LazyQueue is a priority queue data structure where priorities can change over +// time and are only evaluated on demand. +// Two callbacks are required: +// - priority evaluates the actual priority of an item +// - maxPriority gives an upper estimate for the priority in any moment between +// now and the given absolute time +// +// If the upper estimate is exceeded then Update should be called for that item. +// A global Refresh function should also be called periodically. +type LazyQueue[P constraints.Ordered, V any] struct { + clock mclock.Clock + // Items are stored in one of two internal queues ordered by estimated max + // priority until the next and the next-after-next refresh. Update and Refresh + // always places items in queue[1]. + queue [2]*sstack[P, V] + popQueue *sstack[P, V] + period time.Duration + maxUntil mclock.AbsTime + indexOffset int + setIndex SetIndexCallback[V] + priority PriorityCallback[P, V] + maxPriority MaxPriorityCallback[P, V] + lastRefresh1, lastRefresh2 mclock.AbsTime +} + +type ( + PriorityCallback[P constraints.Ordered, V any] func(data V) P // actual priority callback + MaxPriorityCallback[P constraints.Ordered, V any] func(data V, until mclock.AbsTime) P // estimated maximum priority callback +) + +// NewLazyQueue creates a new lazy queue +func NewLazyQueue[P constraints.Ordered, V any](setIndex SetIndexCallback[V], priority PriorityCallback[P, V], maxPriority MaxPriorityCallback[P, V], clock mclock.Clock, refreshPeriod time.Duration) *LazyQueue[P, V] { + q := &LazyQueue[P, V]{ + popQueue: newSstack[P, V](nil), + setIndex: setIndex, + priority: priority, + maxPriority: maxPriority, + clock: clock, + period: refreshPeriod, + lastRefresh1: clock.Now(), + lastRefresh2: clock.Now(), + } + q.Reset() + q.refresh(clock.Now()) + return q +} + +// Reset clears the contents of the queue +func (q *LazyQueue[P, V]) Reset() { + q.queue[0] = newSstack[P, V](q.setIndex0) + q.queue[1] = newSstack[P, V](q.setIndex1) +} + +// Refresh performs queue re-evaluation if necessary +func (q *LazyQueue[P, V]) Refresh() { + now := q.clock.Now() + for time.Duration(now-q.lastRefresh2) >= q.period*2 { + q.refresh(now) + q.lastRefresh2 = q.lastRefresh1 + q.lastRefresh1 = now + } +} + +// refresh re-evaluates items in the older queue and swaps the two queues +func (q *LazyQueue[P, V]) refresh(now mclock.AbsTime) { + q.maxUntil = now.Add(q.period) + for q.queue[0].Len() != 0 { + q.Push(heap.Pop(q.queue[0]).(*item[P, V]).value) + } + q.queue[0], q.queue[1] = q.queue[1], q.queue[0] + q.indexOffset = 1 - q.indexOffset + q.maxUntil = q.maxUntil.Add(q.period) +} + +// Push adds an item to the queue +func (q *LazyQueue[P, V]) Push(data V) { + heap.Push(q.queue[1], &item[P, V]{data, q.maxPriority(data, q.maxUntil)}) +} + +// Update updates the upper priority estimate for the item with the given queue index +func (q *LazyQueue[P, V]) Update(index int) { + q.Push(q.Remove(index)) +} + +// Pop removes and returns the item with the greatest actual priority +func (q *LazyQueue[P, V]) Pop() (V, P) { + var ( + resData V + resPri P + ) + q.MultiPop(func(data V, priority P) bool { + resData = data + resPri = priority + return false + }) + return resData, resPri +} + +// peekIndex returns the index of the internal queue where the item with the +// highest estimated priority is or -1 if both are empty +func (q *LazyQueue[P, V]) peekIndex() int { + if q.queue[0].Len() != 0 { + if q.queue[1].Len() != 0 && q.queue[1].blocks[0][0].priority > q.queue[0].blocks[0][0].priority { + return 1 + } + return 0 + } + if q.queue[1].Len() != 0 { + return 1 + } + return -1 +} + +// MultiPop pops multiple items from the queue and is more efficient than calling +// Pop multiple times. Popped items are passed to the callback. MultiPop returns +// when the callback returns false or there are no more items to pop. +func (q *LazyQueue[P, V]) MultiPop(callback func(data V, priority P) bool) { + nextIndex := q.peekIndex() + for nextIndex != -1 { + data := heap.Pop(q.queue[nextIndex]).(*item[P, V]).value + heap.Push(q.popQueue, &item[P, V]{data, q.priority(data)}) + nextIndex = q.peekIndex() + for q.popQueue.Len() != 0 && (nextIndex == -1 || q.queue[nextIndex].blocks[0][0].priority < q.popQueue.blocks[0][0].priority) { + i := heap.Pop(q.popQueue).(*item[P, V]) + if !callback(i.value, i.priority) { + for q.popQueue.Len() != 0 { + q.Push(heap.Pop(q.popQueue).(*item[P, V]).value) + } + return + } + nextIndex = q.peekIndex() // re-check because callback is allowed to push items back + } + } +} + +// PopItem pops the item from the queue only, dropping the associated priority value. +func (q *LazyQueue[P, V]) PopItem() V { + i, _ := q.Pop() + return i +} + +// Remove removes the item with the given index. +func (q *LazyQueue[P, V]) Remove(index int) V { + return heap.Remove(q.queue[index&1^q.indexOffset], index>>1).(*item[P, V]).value +} + +// Empty checks whether the priority queue is empty. +func (q *LazyQueue[P, V]) Empty() bool { + return q.queue[0].Len() == 0 && q.queue[1].Len() == 0 +} + +// Size returns the number of items in the priority queue. +func (q *LazyQueue[P, V]) Size() int { + return q.queue[0].Len() + q.queue[1].Len() +} + +// setIndex0 translates internal queue item index to the virtual index space of LazyQueue +func (q *LazyQueue[P, V]) setIndex0(data V, index int) { + if index == -1 { + q.setIndex(data, -1) + } else { + q.setIndex(data, index+index) + } +} + +// setIndex1 translates internal queue item index to the virtual index space of LazyQueue +func (q *LazyQueue[P, V]) setIndex1(data V, index int) { + q.setIndex(data, index+index+1) +} diff --git a/common/prque/lazyqueue_test.go b/common/prque/lazyqueue_test.go new file mode 100644 index 0000000000..445a9b6924 --- /dev/null +++ b/common/prque/lazyqueue_test.go @@ -0,0 +1,124 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package prque + +import ( + "math/rand" + "sync" + "testing" + "time" + + "github.com/PlatONnetwork/PlatON-Go/common/mclock" +) + +const ( + testItems = 1000 + testPriorityStep = 100 + testSteps = 1000000 + testStepPeriod = time.Millisecond + testQueueRefresh = time.Second + testAvgRate = float64(testPriorityStep) / float64(testItems) / float64(testStepPeriod) +) + +type lazyItem struct { + p, maxp int64 + last mclock.AbsTime + index int +} + +func testPriority(a interface{}) int64 { + return a.(*lazyItem).p +} + +func testMaxPriority(a interface{}, until mclock.AbsTime) int64 { + i := a.(*lazyItem) + dt := until - i.last + i.maxp = i.p + int64(float64(dt)*testAvgRate) + return i.maxp +} + +func testSetIndex(a interface{}, i int) { + a.(*lazyItem).index = i +} + +func TestLazyQueue(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + clock := &mclock.Simulated{} + q := NewLazyQueue(testSetIndex, testPriority, testMaxPriority, clock, testQueueRefresh) + + var ( + items [testItems]lazyItem + maxPri int64 + ) + + for i := range items[:] { + items[i].p = rand.Int63n(testPriorityStep * 10) + if items[i].p > maxPri { + maxPri = items[i].p + } + items[i].index = -1 + q.Push(&items[i]) + } + + var ( + lock sync.Mutex + wg sync.WaitGroup + stopCh = make(chan chan struct{}) + ) + defer wg.Wait() + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-clock.After(testQueueRefresh): + lock.Lock() + q.Refresh() + lock.Unlock() + case <-stopCh: + return + } + } + }() + + for c := 0; c < testSteps; c++ { + i := rand.Intn(testItems) + lock.Lock() + items[i].p += rand.Int63n(testPriorityStep*2-1) + 1 + if items[i].p > maxPri { + maxPri = items[i].p + } + items[i].last = clock.Now() + if items[i].p > items[i].maxp { + q.Update(items[i].index) + } + if rand.Intn(100) == 0 { + p := q.PopItem().(*lazyItem) + if p.p != maxPri { + lock.Unlock() + close(stopCh) + t.Fatalf("incorrect item (best known priority %d, popped %d)", maxPri, p.p) + } + q.Push(p) + } + lock.Unlock() + clock.Run(testStepPeriod) + clock.WaitForTimers(1) + } + + close(stopCh) +} diff --git a/common/prque/prque.go b/common/prque/prque.go index dd5d94a0ca..0e8c9f897f 100755 --- a/common/prque/prque.go +++ b/common/prque/prque.go @@ -1,63 +1,77 @@ +// CookieJar - A contestant's algorithm toolbox +// Copyright (c) 2013 Peter Szilagyi. All rights reserved. +// +// CookieJar is dual licensed: use of this source code is governed by a BSD +// license that can be found in the LICENSE file. Alternatively, the CookieJar +// toolbox may be used in accordance with the terms and conditions contained +// in a signed written agreement between you and the author(s). + // This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque". +// Package prque implements a priority queue data structure supporting arbitrary +// value types and int64 priorities. +// +// If you would like to use a min-priority queue, simply negate the priorities. +// +// Internally the queue is based on the standard heap package working on a +// sortable version of the block based stack. package prque import ( "container/heap" + + "golang.org/x/exp/constraints" ) // Priority queue data structure. -type Prque struct { - cont *sstack +type Prque[P constraints.Ordered, V any] struct { + cont *sstack[P, V] } // New creates a new priority queue. -func New(setIndex SetIndexCallback) *Prque { - return &Prque{newSstack(setIndex)} +func New[P constraints.Ordered, V any](setIndex SetIndexCallback[V]) *Prque[P, V] { + return &Prque[P, V]{newSstack[P, V](setIndex)} } // Pushes a value with a given priority into the queue, expanding if necessary. -func (p *Prque) Push(data interface{}, priority int64) { - heap.Push(p.cont, &item{data, priority}) +func (p *Prque[P, V]) Push(data V, priority P) { + heap.Push(p.cont, &item[P, V]{data, priority}) } -// Peek returns the value with the greates priority but does not pop it off. -func (p *Prque) Peek() (interface{}, int64) { +// Peek returns the value with the greatest priority but does not pop it off. +func (p *Prque[P, V]) Peek() (V, P) { item := p.cont.blocks[0][0] return item.value, item.priority } -// Pops the value with the greates priority off the stack and returns it. +// Pops the value with the greatest priority off the stack and returns it. // Currently no shrinking is done. -func (p *Prque) Pop() (interface{}, int64) { - item := heap.Pop(p.cont).(*item) +func (p *Prque[P, V]) Pop() (V, P) { + item := heap.Pop(p.cont).(*item[P, V]) return item.value, item.priority } // Pops only the item from the queue, dropping the associated priority value. -func (p *Prque) PopItem() interface{} { - return heap.Pop(p.cont).(*item).value +func (p *Prque[P, V]) PopItem() V { + return heap.Pop(p.cont).(*item[P, V]).value } // Remove removes the element with the given index. -func (p *Prque) Remove(i int) interface{} { - if i < 0 { - return nil - } - return heap.Remove(p.cont, i) +func (p *Prque[P, V]) Remove(i int) V { + return heap.Remove(p.cont, i).(*item[P, V]).value } // Checks whether the priority queue is empty. -func (p *Prque) Empty() bool { +func (p *Prque[P, V]) Empty() bool { return p.cont.Len() == 0 } // Returns the number of element in the priority queue. -func (p *Prque) Size() int { +func (p *Prque[P, V]) Size() int { return p.cont.Len() } // Clears the contents of the priority queue. -func (p *Prque) Reset() { - *p = *New(p.cont.setIndex) +func (p *Prque[P, V]) Reset() { + *p = *New[P, V](p.cont.setIndex) } diff --git a/common/prque/prque_test.go b/common/prque/prque_test.go index ac346e961c..c4910f205a 100644 --- a/common/prque/prque_test.go +++ b/common/prque/prque_test.go @@ -1,53 +1,133 @@ -// Copyright 2021 The PlatON Network Authors -// This file is part of the PlatON-Go library. +// CookieJar - A contestant's algorithm toolbox +// Copyright (c) 2013 Peter Szilagyi. All rights reserved. // -// The PlatON-Go library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The PlatON-Go library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the PlatON-Go library. If not, see . +// CookieJar is dual licensed: use of this source code is governed by a BSD +// license that can be found in the LICENSE file. Alternatively, the CookieJar +// toolbox may be used in accordance with the terms and conditions contained +// in a signed written agreement between you and the author(s). package prque import ( + "math/rand" "testing" - - "github.com/stretchr/testify/assert" ) func TestPrque(t *testing.T) { - queue := New(nil) - assert.True(t, queue.Empty()) - queue.Push("item1", 1) - queue.Push("item5", 5) - queue.Push("item3", 3) - queue.Push("item2", 2) - queue.Push("item4", 4) - assert.False(t, queue.Empty()) - assert.Equal(t, queue.Size(), 5) - value, priority := queue.Pop() - assert.Equal(t, value, "item5") - assert.Equal(t, priority, int64(5)) - assert.Equal(t, queue.Size(), 4) - - value = queue.PopItem() - assert.Equal(t, value, "item4") - assert.Equal(t, queue.Size(), 3) - - queue.Remove(0) // remove item3 - value, priority = queue.Pop() - assert.Equal(t, value, "item2") - assert.Equal(t, priority, int64(2)) - assert.Equal(t, queue.Size(), 1) - - queue.Reset() - assert.True(t, queue.Empty()) - assert.Equal(t, queue.Size(), 0) + // Generate a batch of random data and a specific priority order + size := 16 * blockSize + prio := rand.Perm(size) + data := make([]int, size) + for i := 0; i < size; i++ { + data[i] = rand.Int() + } + queue := New[int, int](nil) + + for rep := 0; rep < 2; rep++ { + // Fill a priority queue with the above data + for i := 0; i < size; i++ { + queue.Push(data[i], prio[i]) + if queue.Size() != i+1 { + t.Errorf("queue size mismatch: have %v, want %v.", queue.Size(), i+1) + } + } + // Create a map the values to the priorities for easier verification + dict := make(map[int]int) + for i := 0; i < size; i++ { + dict[prio[i]] = data[i] + } + + // Pop out the elements in priority order and verify them + prevPrio := size + 1 + for !queue.Empty() { + val, prio := queue.Pop() + if prio > prevPrio { + t.Errorf("invalid priority order: %v after %v.", prio, prevPrio) + } + prevPrio = prio + if val != dict[prio] { + t.Errorf("push/pop mismatch: have %v, want %v.", val, dict[prio]) + } + delete(dict, prio) + } + } +} + +func TestReset(t *testing.T) { + // Generate a batch of random data and a specific priority order + size := 16 * blockSize + prio := rand.Perm(size) + data := make([]int, size) + for i := 0; i < size; i++ { + data[i] = rand.Int() + } + queue := New[int, int](nil) + + for rep := 0; rep < 2; rep++ { + // Fill a priority queue with the above data + for i := 0; i < size; i++ { + queue.Push(data[i], prio[i]) + if queue.Size() != i+1 { + t.Errorf("queue size mismatch: have %v, want %v.", queue.Size(), i+1) + } + } + // Create a map the values to the priorities for easier verification + dict := make(map[int]int) + for i := 0; i < size; i++ { + dict[prio[i]] = data[i] + } + // Pop out half the elements in priority order and verify them + prevPrio := size + 1 + for i := 0; i < size/2; i++ { + val, prio := queue.Pop() + if prio > prevPrio { + t.Errorf("invalid priority order: %v after %v.", prio, prevPrio) + } + prevPrio = prio + if val != dict[prio] { + t.Errorf("push/pop mismatch: have %v, want %v.", val, dict[prio]) + } + delete(dict, prio) + } + // Reset and ensure it's empty + queue.Reset() + if !queue.Empty() { + t.Errorf("priority queue not empty after reset: %v", queue) + } + } +} + +func BenchmarkPush(b *testing.B) { + // Create some initial data + data := make([]int, b.N) + prio := make([]int64, b.N) + for i := 0; i < len(data); i++ { + data[i] = rand.Int() + prio[i] = rand.Int63() + } + // Execute the benchmark + b.ResetTimer() + queue := New[int64, int](nil) + for i := 0; i < len(data); i++ { + queue.Push(data[i], prio[i]) + } +} + +func BenchmarkPop(b *testing.B) { + // Create some initial data + data := make([]int, b.N) + prio := make([]int64, b.N) + for i := 0; i < len(data); i++ { + data[i] = rand.Int() + prio[i] = rand.Int63() + } + queue := New[int64, int](nil) + for i := 0; i < len(data); i++ { + queue.Push(data[i], prio[i]) + } + // Execute the benchmark + b.ResetTimer() + for !queue.Empty() { + queue.Pop() + } } diff --git a/common/prque/sstack.go b/common/prque/sstack.go index a568cc1401..5dcd1d9dd0 100755 --- a/common/prque/sstack.go +++ b/common/prque/sstack.go @@ -1,52 +1,59 @@ +// CookieJar - A contestant's algorithm toolbox +// Copyright (c) 2013 Peter Szilagyi. All rights reserved. +// +// CookieJar is dual licensed: use of this source code is governed by a BSD +// license that can be found in the LICENSE file. Alternatively, the CookieJar +// toolbox may be used in accordance with the terms and conditions contained +// in a signed written agreement between you and the author(s). + // This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque". package prque +import "golang.org/x/exp/constraints" + // The size of a block of data const blockSize = 4096 // A prioritized item in the sorted stack. -// -// Note: priorities can "wrap around" the int64 range, a comes before b if (a.priority - b.priority) > 0. -// The difference between the lowest and highest priorities in the queue at any point should be less than 2^63. -type item struct { - value interface{} - priority int64 +type item[P constraints.Ordered, V any] struct { + value V + priority P } // SetIndexCallback is called when the element is moved to a new index. // Providing SetIndexCallback is optional, it is needed only if the application needs // to delete elements other than the top one. -type SetIndexCallback func(a interface{}, i int) +type SetIndexCallback[V any] func(data V, index int) // Internal sortable stack data structure. Implements the Push and Pop ops for // the stack (heap) functionality and the Len, Less and Swap methods for the // sortability requirements of the heaps. -type sstack struct { - setIndex SetIndexCallback +type sstack[P constraints.Ordered, V any] struct { + setIndex SetIndexCallback[V] size int capacity int offset int - blocks [][]*item - active []*item + blocks [][]*item[P, V] + active []*item[P, V] } // Creates a new, empty stack. -func newSstack(setIndex SetIndexCallback) *sstack { - result := new(sstack) +func newSstack[P constraints.Ordered, V any](setIndex SetIndexCallback[V]) *sstack[P, V] { + result := new(sstack[P, V]) result.setIndex = setIndex - result.active = make([]*item, blockSize) - result.blocks = [][]*item{result.active} + result.active = make([]*item[P, V], blockSize) + result.blocks = [][]*item[P, V]{result.active} result.capacity = blockSize return result } // Pushes a value onto the stack, expanding it if necessary. Required by // heap.Interface. -func (s *sstack) Push(data interface{}) { +func (s *sstack[P, V]) Push(data any) { if s.size == s.capacity { - s.active = make([]*item, blockSize) + s.active = make([]*item[P, V], blockSize) s.blocks = append(s.blocks, s.active) s.capacity += blockSize s.offset = 0 @@ -55,16 +62,16 @@ func (s *sstack) Push(data interface{}) { s.offset = 0 } if s.setIndex != nil { - s.setIndex(data.(*item).value, s.size) + s.setIndex(data.(*item[P, V]).value, s.size) } - s.active[s.offset] = data.(*item) + s.active[s.offset] = data.(*item[P, V]) s.offset++ s.size++ } // Pops a value off the stack and returns it. Currently no shrinking is done. // Required by heap.Interface. -func (s *sstack) Pop() (res interface{}) { +func (s *sstack[P, V]) Pop() (res any) { s.size-- s.offset-- if s.offset < 0 { @@ -73,24 +80,24 @@ func (s *sstack) Pop() (res interface{}) { } res, s.active[s.offset] = s.active[s.offset], nil if s.setIndex != nil { - s.setIndex(res.(*item).value, -1) + s.setIndex(res.(*item[P, V]).value, -1) } return } // Returns the length of the stack. Required by sort.Interface. -func (s *sstack) Len() int { +func (s *sstack[P, V]) Len() int { return s.size } // Compares the priority of two elements of the stack (higher is first). // Required by sort.Interface. -func (s *sstack) Less(i, j int) bool { - return (s.blocks[i/blockSize][i%blockSize].priority - s.blocks[j/blockSize][j%blockSize].priority) > 0 +func (s *sstack[P, V]) Less(i, j int) bool { + return s.blocks[i/blockSize][i%blockSize].priority > s.blocks[j/blockSize][j%blockSize].priority } // Swaps two elements in the stack. Required by sort.Interface. -func (s *sstack) Swap(i, j int) { +func (s *sstack[P, V]) Swap(i, j int) { ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize a, b := s.blocks[jb][jo], s.blocks[ib][io] if s.setIndex != nil { @@ -101,6 +108,6 @@ func (s *sstack) Swap(i, j int) { } // Resets the stack, effectively clearing its contents. -func (s *sstack) Reset() { - *s = *newSstack(s.setIndex) +func (s *sstack[P, V]) Reset() { + *s = *newSstack[P, V](s.setIndex) } diff --git a/common/prque/sstack_test.go b/common/prque/sstack_test.go new file mode 100644 index 0000000000..edc99955e8 --- /dev/null +++ b/common/prque/sstack_test.go @@ -0,0 +1,100 @@ +// CookieJar - A contestant's algorithm toolbox +// Copyright (c) 2013 Peter Szilagyi. All rights reserved. +// +// CookieJar is dual licensed: use of this source code is governed by a BSD +// license that can be found in the LICENSE file. Alternatively, the CookieJar +// toolbox may be used in accordance with the terms and conditions contained +// in a signed written agreement between you and the author(s). + +package prque + +import ( + "math/rand" + "sort" + "testing" +) + +func TestSstack(t *testing.T) { + // Create some initial data + size := 16 * blockSize + data := make([]*item[int64, int], size) + for i := 0; i < size; i++ { + data[i] = &item[int64, int]{rand.Int(), rand.Int63()} + } + stack := newSstack[int64, int](nil) + for rep := 0; rep < 2; rep++ { + // Push all the data into the stack, pop out every second + secs := []*item[int64, int]{} + for i := 0; i < size; i++ { + stack.Push(data[i]) + if i%2 == 0 { + secs = append(secs, stack.Pop().(*item[int64, int])) + } + } + rest := []*item[int64, int]{} + for stack.Len() > 0 { + rest = append(rest, stack.Pop().(*item[int64, int])) + } + // Make sure the contents of the resulting slices are ok + for i := 0; i < size; i++ { + if i%2 == 0 && data[i] != secs[i/2] { + t.Errorf("push/pop mismatch: have %v, want %v.", secs[i/2], data[i]) + } + if i%2 == 1 && data[i] != rest[len(rest)-i/2-1] { + t.Errorf("push/pop mismatch: have %v, want %v.", rest[len(rest)-i/2-1], data[i]) + } + } + } +} + +func TestSstackSort(t *testing.T) { + // Create some initial data + size := 16 * blockSize + data := make([]*item[int64, int], size) + for i := 0; i < size; i++ { + data[i] = &item[int64, int]{rand.Int(), int64(i)} + } + // Push all the data into the stack + stack := newSstack[int64, int](nil) + for _, val := range data { + stack.Push(val) + } + // Sort and pop the stack contents (should reverse the order) + sort.Sort(stack) + for _, val := range data { + out := stack.Pop() + if out != val { + t.Errorf("push/pop mismatch after sort: have %v, want %v.", out, val) + } + } +} + +func TestSstackReset(t *testing.T) { + // Create some initial data + size := 16 * blockSize + data := make([]*item[int64, int], size) + for i := 0; i < size; i++ { + data[i] = &item[int64, int]{rand.Int(), rand.Int63()} + } + stack := newSstack[int64, int](nil) + for rep := 0; rep < 2; rep++ { + // Push all the data into the stack, pop out every second + secs := []*item[int64, int]{} + for i := 0; i < size; i++ { + stack.Push(data[i]) + if i%2 == 0 { + secs = append(secs, stack.Pop().(*item[int64, int])) + } + } + // Reset and verify both pulled and stack contents + stack.Reset() + if stack.Len() != 0 { + t.Errorf("stack not empty after reset: %v", stack) + } + for i := 0; i < size; i++ { + if i%2 == 0 && data[i] != secs[i/2] { + t.Errorf("push/pop mismatch: have %v, want %v.", secs[i/2], data[i]) + } + } + } +} diff --git a/consensus/bft_mock.go b/consensus/bft_mock.go index 1c900832b6..98a3a399ce 100644 --- a/consensus/bft_mock.go +++ b/consensus/bft_mock.go @@ -270,8 +270,8 @@ func (bm *BftMock) Prepare(chain ChainReader, header *types.Header) error { // and assembles the final block. // Note: The block header and state database might be updated to reflect any // consensus rules that happen at finalization (e.g. block rewards). -func (bm *BftMock) Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, - receipts []*types.Receipt) (*types.Block, error) { +func (bm *BftMock) Finalize(chain ChainReader, header *types.Header, state *state.StateDB, + txs []*types.Transaction, receipts []*types.Receipt, withdrawals []*types.Withdrawal) (*types.Block, error) { header.Root = state.IntermediateRoot(true) // Header seems complete, assemble into a block and return diff --git a/consensus/cbft/cbft.go b/consensus/cbft/cbft.go index bd1ede06ef..1a6d37ff52 100644 --- a/consensus/cbft/cbft.go +++ b/consensus/cbft/cbft.go @@ -809,7 +809,8 @@ func (cbft *Cbft) Prepare(chain consensus.ChainReader, header *types.Header) err // Finalize implements consensus.Engine, no block // rewards given, and returns the final block. -func (cbft *Cbft) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, receipts []*types.Receipt) (*types.Block, error) { +func (cbft *Cbft) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, + txs []*types.Transaction, receipts []*types.Receipt, withdrawals []*types.Withdrawal) (*types.Block, error) { header.Root = state.IntermediateRoot(true) cbft.log.Debug("Finalize block", "hash", header.Hash(), "number", header.Number, "txs", len(txs), "receipts", len(receipts), "root", header.Root.String()) return types.NewBlock(header, txs, receipts, new(trie.Trie)), nil diff --git a/consensus/consensus.go b/consensus/consensus.go index f91ccff918..0b5adbad0e 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -102,7 +102,7 @@ type Engine interface { // Note: The block header and state database might be updated to reflect any // consensus rules that happen at finalization (e.g. block rewards). Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, - receipts []*types.Receipt) (*types.Block, error) + receipts []*types.Receipt, withdrawals []*types.Withdrawal) (*types.Block, error) // Seal generates a new sealing request for the given input block and pushes // the result into the given channel. diff --git a/console/console.go b/console/console.go index 2d07dd2e63..30e2d467c1 100644 --- a/console/console.go +++ b/console/console.go @@ -29,6 +29,8 @@ import ( "sync" "syscall" + "github.com/PlatONnetwork/PlatON-Go/log" + "github.com/PlatONnetwork/PlatON-Go/console/prompt" "github.com/PlatONnetwork/PlatON-Go/internal/jsre/deps" @@ -207,7 +209,7 @@ func (c *Console) initExtensions() error { if err != nil { return fmt.Errorf("api modules: %v", err) } - aliases := map[string]struct{}{"platon": {}, "personal": {}} + aliases := map[string]struct{}{"platon": {}} for api := range apis { if api == "web3" { continue @@ -252,6 +254,7 @@ func (c *Console) initPersonal(vm *goja.Runtime, bridge *bridge) { if personal == nil || c.prompter == nil { return } + log.Warn("Enabling deprecated personal namespace") jeth := vm.NewObject() vm.Set("jplaton", jeth) jeth.Set("openWallet", personal.Get("openWallet")) @@ -297,12 +300,8 @@ func (c *Console) AutoCompleteInput(line string, pos int) (string, []string, str start := pos - 1 for ; start > 0; start-- { // Skip all methods and namespaces (i.e. including the dot) - if line[start] == '.' || (line[start] >= 'a' && line[start] <= 'z') || (line[start] >= 'A' && line[start] <= 'Z') { - continue - } - // Handle web3 in a special way (i.e. other numbers aren't auto completed) - if start >= 3 && line[start-3:start] == "web3" { - start -= 3 + c := line[start] + if c == '.' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '1' && c <= '9') { continue } // We've hit an unexpected character, autocomplete form here diff --git a/core/asm/asm_test.go b/core/asm/asm_test.go index 92b26b67a5..e25e2942e4 100644 --- a/core/asm/asm_test.go +++ b/core/asm/asm_test.go @@ -17,58 +17,41 @@ package asm import ( - "testing" - "encoding/hex" + "testing" ) -// Tests disassembling the instructions for valid evm code -func TestInstructionIteratorValid(t *testing.T) { - cnt := 0 - script, _ := hex.DecodeString("61000000") - - it := NewInstructionIterator(script) - for it.Next() { - cnt++ - } - - if err := it.Error(); err != nil { - t.Errorf("Expected 2, but encountered error %v instead.", err) - } - if cnt != 2 { - t.Errorf("Expected 2, but got %v instead.", cnt) - } -} - -// Tests disassembling the instructions for invalid evm code -func TestInstructionIteratorInvalid(t *testing.T) { - cnt := 0 - script, _ := hex.DecodeString("6100") - - it := NewInstructionIterator(script) - for it.Next() { - cnt++ - } - - if it.Error() == nil { - t.Errorf("Expected an error, but got %v instead.", cnt) - } -} - -// Tests disassembling the instructions for empty evm code -func TestInstructionIteratorEmpty(t *testing.T) { - cnt := 0 - script, _ := hex.DecodeString("") - - it := NewInstructionIterator(script) - for it.Next() { - cnt++ - } - - if err := it.Error(); err != nil { - t.Errorf("Expected 0, but encountered error %v instead.", err) - } - if cnt != 0 { - t.Errorf("Expected 0, but got %v instead.", cnt) +// Tests disassembling instructions +func TestInstructionIterator(t *testing.T) { + for i, tc := range []struct { + want int + code string + wantErr string + }{ + {2, "61000000", ""}, // valid code + {0, "6100", "incomplete push instruction at 0"}, // invalid code + {2, "5900", ""}, // push0 + {0, "", ""}, // empty + + } { + var ( + have int + code, _ = hex.DecodeString(tc.code) + it = NewInstructionIterator(code) + ) + for it.Next() { + have++ + } + var haveErr = "" + if it.Error() != nil { + haveErr = it.Error().Error() + } + if haveErr != tc.wantErr { + t.Errorf("test %d: encountered error: %q want %q", i, haveErr, tc.wantErr) + continue + } + if have != tc.want { + t.Errorf("wrong instruction count, have %d want %d", have, tc.want) + } } } diff --git a/core/block_validator.go b/core/block_validator.go index 8636aa02e2..5cc2bb7ff7 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -70,7 +70,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { // Header validity is known at this point, check the transactions header := block.Header() if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash { - return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash) + return fmt.Errorf("transaction root hash mismatch (header value %x, calculated %x)", header.TxHash, hash) + } + if header.WithdrawalsHash != nil { + if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash { + return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash) + } } return nil } diff --git a/core/blockchain.go b/core/blockchain.go index 3f20048659..524bdd1f33 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -195,10 +195,12 @@ type BlockChain struct { chainConfig *params.ChainConfig // Chain & network configuration cacheConfig *CacheConfig // Cache configuration for pruning - db ethdb.Database // Low level persistent database to store final content in - snaps *snapshot.Tree // Snapshot tree for fast trie leaf access - triegc *prque.Prque // Priority queue mapping block numbers to tries to gc - gcproc time.Duration // Accumulates canonical block processing for trie dumping + db ethdb.Database // Low level persistent database to store final content in + snaps *snapshot.Tree // Snapshot tree for fast trie leaf access + triegc *prque.Prque[int64, common.Hash] // Priority queue mapping block numbers to tries to gc + gcproc time.Duration // Accumulates canonical block processing for trie dumping + triedb *trie.Database // The database handler for maintaining trie nodes. + stateCache state.Database // State database to reuse between imports (contains state cache) // txLookupLimit is the maximum number of blocks from head whose tx indices // are reserved: @@ -228,13 +230,12 @@ type BlockChain struct { currentBlock atomic.Value // Current head of the block chain currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) - stateCache state.Database // State database to reuse between imports (contains state cache) - bodyCache *lru.Cache // Cache for the most recent block bodies - bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format - receiptsCache *lru.Cache // Cache for the most recent receipts per block - blockCache *lru.Cache // Cache for the most recent entire blocks - txLookupCache *lru.Cache // Cache for the most recent transaction lookup data. - futureBlocks *lru.Cache // future blocks are blocks added for later processing + bodyCache *lru.Cache // Cache for the most recent block bodies + bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format + receiptsCache *lru.Cache // Cache for the most recent receipts per block + blockCache *lru.Cache // Cache for the most recent entire blocks + txLookupCache *lru.Cache // Cache for the most recent transaction lookup data. + futureBlocks *lru.Cache // future blocks are blocks added for later processing wg sync.WaitGroup // quit chan struct{} // shutdown signal, closed in Stop. @@ -266,16 +267,19 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par txLookupCache, _ := lru.New(txLookupCacheLimit) futureBlocks, _ := lru.New(cacheConfig.MaxFutureBlocks) + // Open trie database with provided config + triedb := trie.NewDatabaseWithConfig(db, &trie.Config{ + Cache: cacheConfig.TrieCleanLimit, + Journal: cacheConfig.TrieCleanJournal, + Preimages: cacheConfig.Preimages, + }) + bc := &BlockChain{ - chainConfig: chainConfig, - cacheConfig: cacheConfig, - db: db, - triegc: prque.New(nil), - stateCache: state.NewDatabaseWithConfig(db, &trie.Config{ - Cache: cacheConfig.TrieCleanLimit, - Journal: cacheConfig.TrieCleanJournal, - Preimages: cacheConfig.Preimages, - }), + chainConfig: chainConfig, + cacheConfig: cacheConfig, + db: db, + triegc: prque.New[int64, common.Hash](nil), + triedb: triedb, quit: make(chan struct{}), chainmu: syncx.NewClosableMutex(), shouldPreserve: shouldPreserve, @@ -288,6 +292,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par engine: engine, vmConfig: vmConfig, } + bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb) bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) if chainConfig.Test() { @@ -329,7 +334,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par // Make sure the state associated with the block is available head := bc.CurrentBlock() - if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { + if !bc.HasState(head.Root()) { log.Warn("Head state missing, repair not supported", "number", head.NumberU64(), "hash", head.Hash().String()) return nil, fmt.Errorf("head state missing, repair not supported, number:%d, hash:%s", head.NumberU64(), head.Hash().String()) /* @@ -419,7 +424,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par NoBuild: bc.cacheConfig.SnapshotNoBuild, AsyncBuild: !bc.cacheConfig.SnapshotWait, } - bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.stateCache.TrieDB(), head.Root()) + bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root()) } // Start future block processor. @@ -439,11 +444,10 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute) bc.cacheConfig.TrieCleanRejournal = time.Minute } - triedb := bc.stateCache.TrieDB() bc.wg.Add(1) go func() { defer bc.wg.Done() - triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit) + bc.triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit) }() } return bc, nil @@ -693,7 +697,7 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error { if block == nil { return fmt.Errorf("non existent block [%x..]", hash[:4]) } - if _, err := trie.NewStateTrie(common.Hash{}, block.Root(), bc.stateCache.TrieDB()); err != nil { + if _, err := trie.NewStateTrie(common.Hash{}, block.Root(), bc.triedb); err != nil { return err } // If all checks out, manually set the head block @@ -902,7 +906,7 @@ func (bc *BlockChain) Stop() { // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle // - HEAD-127: So we have a hard limit on the number of blocks reexecuted if !bc.cacheConfig.Disabled { - triedb := bc.stateCache.TrieDB() + triedb := bc.triedb if 0 >= bc.cacheConfig.TriesInMemory { bc.cacheConfig.TriesInMemory = 128 } @@ -924,21 +928,20 @@ func (bc *BlockChain) Stop() { } } for !bc.triegc.Empty() { - triedb.Dereference(bc.triegc.PopItem().(common.Hash)) + triedb.Dereference(bc.triegc.PopItem()) } if size, _ := triedb.Size(); size != 0 { log.Error("Dangling trie nodes after full cleanup") } } // Flush the collected preimages to disk - if err := bc.stateCache.TrieDB().CommitPreimages(); err != nil { + if err := bc.triedb.CommitPreimages(); err != nil { log.Error("Failed to commit trie preimages", "err", err) } // Ensure all live cached entries be saved into disk, so that we can skip // cache warmup when node restarts. if bc.cacheConfig.TrieCleanJournal != "" { - triedb := bc.stateCache.TrieDB() - triedb.SaveCache(bc.cacheConfig.TrieCleanJournal) + bc.triedb.SaveCache(bc.cacheConfig.TrieCleanJournal) } log.Info("Blockchain stopped") } @@ -1297,7 +1300,6 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // Irrelevant of the canonical status, write the block itself to the database rawdb.WriteBlock(bc.db, block) - triedb := bc.stateCache.TrieDB() root, err := state.Commit(true) if err != nil { @@ -1311,39 +1313,39 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. oversize := false if !(bc.cacheConfig.DBGCMpt && !bc.cacheConfig.DBDisabledGC.IsSet()) { //triedb.ReferenceVersion(root) - if err := triedb.Commit(root, false, false); err != nil { + if err := bc.triedb.Commit(root, false, false); err != nil { log.Error("Commit to triedb error", "root", root) return NonStatTy, err } //triedb.Dereference(currentBlock.Root()) - nodes, _ := triedb.Size() + nodes, _ := bc.triedb.Size() oversize = nodes > limit } else { - triedb.ReferenceVersion(root) - triedb.DereferenceDB(currentBlock.Root()) + bc.triedb.ReferenceVersion(root) + bc.triedb.DereferenceDB(currentBlock.Root()) - if err := triedb.Commit(root, false, false); err != nil { + if err := bc.triedb.Commit(root, false, false); err != nil { log.Error("Commit to triedb error", "root", root) return NonStatTy, err } - if triedb.UselessSize() > bc.cacheConfig.DBGCBlock { - triedb.UselessGC(1) + if bc.triedb.UselessSize() > bc.cacheConfig.DBGCBlock { + bc.triedb.UselessGC(1) } - nodes, _ := triedb.Size() + nodes, _ := bc.triedb.Size() oversize = nodes > limit } if oversize { - triedb.CapNode(limit * defaultCapNodePercent) - triedb.ResetUseless() + bc.triedb.CapNode(limit * defaultCapNodePercent) + bc.triedb.ResetUseless() } log.Debug("archive node commit stateDB trie", "blockNumber", block.NumberU64(), "blockHash", block.Hash().Hex(), "root", root.String()) } else { log.Debug("non-archive node put stateDB trie", "blockNumber", block.NumberU64(), "blockHash", block.Hash().Hex(), "root", root.String()) // Full but not archive node, do proper garbage collection - triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive + bc.triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive bc.triegc.Push(root, -int64(block.NumberU64())) if 0 >= bc.cacheConfig.TriesInMemory { @@ -1353,11 +1355,11 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. if current := block.NumberU64(); current > (uint64)(bc.cacheConfig.TriesInMemory) { // If we exceeded our memory allowance, flush matured singleton nodes to disk var ( - nodes, imgs = triedb.Size() + nodes, imgs = bc.triedb.Size() limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 ) if nodes > limit || imgs > 4*1024*1024 { - triedb.Cap(limit - ethdb.IdealBatchSize) + bc.triedb.Cap(limit - ethdb.IdealBatchSize) } // Find the next state trie we need to commit header := bc.GetHeaderByNumber(current - (uint64)(bc.cacheConfig.TriesInMemory)) @@ -1372,7 +1374,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. float64(chosen-lastWrite)/(float64)(bc.cacheConfig.TriesInMemory)) } // Flush an entire trie and restart the counters - triedb.Commit(header.Root, true, true) + bc.triedb.Commit(header.Root, true, true) lastWrite = chosen bc.gcproc = 0 } @@ -1383,7 +1385,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. bc.triegc.Push(root, number) break } - triedb.Dereference(root.(common.Hash)) + bc.triedb.Dereference(root) } } } @@ -1602,7 +1604,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er } blockInsertTimer.UpdateSince(start) - dirty, _ := bc.stateCache.TrieDB().Size() + dirty, _ := bc.triedb.Size() stats.report(chain, it.index, dirty) } diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index a0f2faa5d4..8afd7b97c6 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -345,6 +345,11 @@ func (bc *BlockChain) TxLookupLimit() uint64 { return bc.txLookupLimit } +// TrieDB retrieves the low level trie database used for data storage. +func (bc *BlockChain) TrieDB() *trie.Database { + return bc.triedb +} + // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index fa0adb7cb9..d7cdd2fd0f 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -58,7 +58,10 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo // Create a temporary persistent database datadir := t.TempDir() - db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false) + db, err := rawdb.Open(rawdb.OpenOptions{ + Directory: datadir, + AncientsDirectory: datadir, + }) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -243,7 +246,11 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { db.Close() // Start a new blockchain back up and see where the repair leads us - newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "", false) + newdb, err := rawdb.Open(rawdb.OpenOptions{ + Directory: snaptest.datadir, + AncientsDirectory: snaptest.datadir, + }) + if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index a4ad4da590..e068d4d73f 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -22,6 +22,8 @@ import ( "os" "testing" + "github.com/PlatONnetwork/PlatON-Go/eth/tracers/logger" + "github.com/PlatONnetwork/PlatON-Go/common" "github.com/PlatONnetwork/PlatON-Go/consensus" "github.com/PlatONnetwork/PlatON-Go/core/rawdb" @@ -1680,6 +1682,13 @@ func TestEIP1559Transition(t *testing.T) { t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual) } } +<<<<<<< HEAD +func TestEIP3651(t *testing.T) { + var ( + aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") + bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb") + engine = consensus.NewFaker() +======= // TestTransientStorageReset ensures the transient storage is wiped correctly // between transactions. @@ -1787,6 +1796,7 @@ func TestEIP3651(t *testing.T) { bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb") db = rawdb.NewMemoryDatabase() //engine = consensus.NewFaker() +>>>>>>> 2afbce83a0df666af1e2d171ee9ab8256167eefa // A sender who makes transactions, has some funds key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -1831,6 +1841,17 @@ func TestEIP3651(t *testing.T) { } ) +<<<<<<< HEAD + gspec.Config.DiracBlock = big.NewInt(0) + signer := types.LatestSigner(gspec.Config, true) + + db := rawdb.NewMemoryDatabase() + blocks, _ := GenerateChain(gspec.Config, gspec.MustCommit(db), engine, db, 1, func(i int, b *BlockGen) { + b.SetCoinbase(aa) + // One transaction to Coinbase + txdata := &types.DynamicFeeTx{ + ChainID: gspec.Config.ChainID, +======= gspec.Config.PauliBlock = common.Big0 gspec.Config.DiracBlock = common.Big0 //gspec.Config.ShanghaiTime = u64(0) @@ -1844,6 +1865,7 @@ func TestEIP3651(t *testing.T) { // One transaction to Coinbase txdata := &types.DynamicFeeTx{ ChainID: gspec.Config.PIP7ChainID, +>>>>>>> 2afbce83a0df666af1e2d171ee9ab8256167eefa Nonce: 0, To: &bb, Gas: 500000, @@ -1861,8 +1883,11 @@ func TestEIP3651(t *testing.T) { if err != nil { t.Fatalf("failed to create tester chain: %v", err) } +<<<<<<< HEAD +======= engine.SetChain(chain) NewExecutor(gspec.Config, chain, chain.vmConfig, nil) +>>>>>>> 2afbce83a0df666af1e2d171ee9ab8256167eefa if n, err := chain.InsertChain(blocks); err != nil { t.Fatalf("block %d: failed to insert into chain: %v", n, err) } diff --git a/core/chain_makers.go b/core/chain_makers.go index 6d4a67f173..8e5e37be29 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -42,9 +42,10 @@ type BlockGen struct { header *types.Header statedb *state.StateDB - gasPool *GasPool - txs []*types.Transaction - receipts []*types.Receipt + gasPool *GasPool + txs []*types.Transaction + receipts []*types.Receipt + withdrawals []*types.Withdrawal config *params.ChainConfig engine consensus.Engine @@ -170,6 +171,26 @@ func (b *BlockGen) TxNonce(addr common.Address) uint64 { return b.statedb.GetNonce(addr) } +// AddWithdrawal adds a withdrawal to the generated block. +func (b *BlockGen) AddWithdrawal(w *types.Withdrawal) { + // The withdrawal will be assigned the next valid index. + var idx uint64 + for i := b.i - 1; i >= 0; i-- { + if wd := b.chain[i].Withdrawals(); len(wd) != 0 { + idx = wd[len(wd)-1].Index + 1 + break + } + if i == 0 { + // Correctly set the index if no parent had withdrawals + if wd := b.parent.Withdrawals(); len(wd) != 0 { + idx = wd[len(wd)-1].Index + 1 + } + } + } + w.Index = idx + b.withdrawals = append(b.withdrawals, w) +} + // PrevBlock returns a previously generated block by number. It panics if // num is greater or equal to the number of the block being generated. // For index -1, PrevBlock returns the parent block given to GenerateChain. @@ -222,7 +243,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse } if b.engine != nil { // Finalize and seal the block - block, _ := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.receipts) + block, _ := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.receipts, b.withdrawals) // Write state changes to db root, err := statedb.Commit(true) @@ -280,7 +301,7 @@ func GenerateBlockChain2(config *params.ChainConfig, parent *types.Block, engine } if b.engine != nil { // Finalize and seal the block - block, _ := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.receipts) + block, _ := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.receipts, b.withdrawals) err := blockchain.WriteBlockWithState(block, b.receipts, nil, statedb, false, nil) if err != nil { @@ -322,7 +343,7 @@ func GenerateBlockChain3(config *params.ChainConfig, parent *types.Block, engine } if b.engine != nil { // Finalize and seal the block - block, _ := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.receipts) + block, _ := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.receipts, b.withdrawals) err := chain.WriteBlockWithState(block, b.receipts, nil, statedb, false, nil) if err != nil { @@ -377,7 +398,7 @@ func GenerateBlockChain(config *params.ChainConfig, parent *types.Block, engine } if b.engine != nil { // Finalize and seal the block - block, _ := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.receipts) + block, _ := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.receipts, b.withdrawals) err := blockchain.WriteBlockWithState(block, b.receipts, nil, statedb, false, nil) if err != nil { diff --git a/core/evm.go b/core/evm.go index 5f5fa4be83..6b8a63213b 100644 --- a/core/evm.go +++ b/core/evm.go @@ -64,7 +64,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext) vm.BlockContex GetNonce: GetNonceFn(header, chain), Coinbase: beneficiary, BlockNumber: new(big.Int).Set(header.Number), - Time: new(big.Int).SetUint64(header.Time), + Time: header.Time, BaseFee: baseFee, GasLimit: header.GasLimit, BlockHash: blockHash, diff --git a/core/genesis.go b/core/genesis.go index ca2cb9d9e9..5606cbe358 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -133,6 +133,11 @@ func (e *GenesisMismatchError) Error() string { return fmt.Sprintf("database already contains an incompatible genesis block (have %x, new %x)", e.Stored[:8], e.New[:8]) } +// ChainOverrides contains the changes to chain config. +type ChainOverrides struct { + OverrideShanghai *uint64 +} + // SetupGenesisBlock writes or updates the genesis block in db. // The block that will be used is: // @@ -463,6 +468,7 @@ func (g *Genesis) ToBlock(db ethdb.Database, sdb snapshotdb.BaseDB) *types.Block panic("Failed Store staking: " + err.Error()) } } + var withdrawals []*types.Withdrawal // 1.3.0 if gov.Gte130Version(genesisVersion) { if err := gov.WriteEcHash130(statedb); nil != err { @@ -470,6 +476,10 @@ func (g *Genesis) ToBlock(db ethdb.Database, sdb snapshotdb.BaseDB) *types.Block } } + if gov.Gte160Version(genesisVersion) { + withdrawals = make([]*types.Withdrawal, 0) + } + if g.Config != nil { if g.Config.AddressHRP != "" { statedb.SetString(vm.StakingContractAddr, rawdb.AddressHRPKey, g.Config.AddressHRP) @@ -511,7 +521,7 @@ func (g *Genesis) ToBlock(db ethdb.Database, sdb snapshotdb.BaseDB) *types.Block panic("Failed to trieDB commit by genesis: " + err.Error()) } - block := types.NewBlock(head, nil, nil, new(trie.Trie)) + block := types.NewBlock(head, nil, nil, new(trie.Trie)).WithWithdrawals(withdrawals) if err := sdb.SetCurrent(block.Hash(), *common.Big0, *common.Big0); nil != err { panic(fmt.Errorf("Failed to SetCurrent by snapshotdb. genesisHash: %s, error:%s", block.Hash().Hex(), err.Error())) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 4897580360..c321dcb12f 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -655,9 +655,9 @@ func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, t return nil } -// ReadLogs retrieves the logs for all transactions in a block. The log fields -// are populated with metadata. In case the receipts or the block body -// are not found, a nil is returned. +// ReadLogs retrieves the logs for all transactions in a block. In case +// receipts is not found, a nil is returned. +// Note: ReadLogs does not derive unstored log fields. func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) [][]*types.Log { // Retrieve the flattened receipt slice data := ReadReceiptsRLP(db, hash, number) @@ -675,15 +675,6 @@ func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.C return nil } - body := ReadBody(db, hash, number) - if body == nil { - log.Error("Missing body but have receipt", "hash", hash, "number", number) - return nil - } - if err := deriveLogFields(receipts, hash, number, body.Transactions); err != nil { - log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) - return nil - } logs := make([][]*types.Log, len(receipts)) for i, receipt := range receipts { logs[i] = receipt.Logs @@ -721,7 +712,7 @@ func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block { if body == nil { return nil } - return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.ExtraData) + return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.ExtraData).WithWithdrawals(body.Withdrawals) } // WriteBlock serializes a block into the database, header and body separately. @@ -815,7 +806,7 @@ func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block { } for _, bad := range badBlocks { if bad.Header.Hash() == hash { - return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.ExtraData) + return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.ExtraData).WithWithdrawals(bad.Body.Withdrawals) } } return nil @@ -834,7 +825,7 @@ func ReadAllBadBlocks(db ethdb.Reader) []*types.Block { } var blocks []*types.Block for _, bad := range badBlocks { - blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.ExtraData)) + blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.ExtraData).WithWithdrawals(bad.Body.Withdrawals)) } return blocks } diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 08a9e8f415..36567596f4 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -545,10 +545,6 @@ func TestReadLogs(t *testing.T) { t.Fatalf("unexpected number of logs[1] returned, have %d want %d", have, want) } - // Fill in log fields so we can compare their rlp encoding - if err := types.Receipts(receipts).DeriveFields(params.TestChainConfig, hash, 0, body.Transactions); err != nil { - t.Fatal(err) - } for i, pr := range receipts { for j, pl := range pr.Logs { rlpHave, err := rlp.EncodeToBytes(newFullLogRLP(logs[i][j])) diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index 2faf0f8d65..c26f72cc96 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -28,6 +28,17 @@ func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { return data } +// WritePreimages writes the provided set of preimages to the database. +func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { + for hash, preimage := range preimages { + if err := db.Put(preimageKey(hash), preimage); err != nil { + log.Crit("Failed to store trie preimage", "err", err) + } + } + preimageCounter.Inc(int64(len(preimages))) + preimageHitCounter.Inc(int64(len(preimages))) +} + // ReadCode retrieves the contract code of the provided code hash. func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { // Try with the prefixed code scheme first, if not then try with legacy @@ -48,12 +59,6 @@ func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte { return data } -// ReadTrieNode retrieves the trie node of the provided hash. -func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte { - data, _ := db.Get(hash.Bytes()) - return data -} - // HasCode checks if the contract code corresponding to the // provided code hash is present in the db. func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool { @@ -74,23 +79,6 @@ func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool { return ok } -// HasTrieNode checks if the trie node with the provided hash is present in db. -func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool { - ok, _ := db.Has(hash.Bytes()) - return ok -} - -// WritePreimages writes the provided set of preimages to the database. -func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { - for hash, preimage := range preimages { - if err := db.Put(preimageKey(hash), preimage); err != nil { - log.Crit("Failed to store trie preimage", "err", err) - } - } - preimageCounter.Inc(int64(len(preimages))) - preimageHitCounter.Inc(int64(len(preimages))) -} - // WriteCode writes the provided contract code database. func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { if err := db.Put(codeKey(hash), code); err != nil { @@ -98,23 +86,9 @@ func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { } } -// WriteTrieNode writes the provided trie node database. -func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) { - if err := db.Put(hash.Bytes(), node); err != nil { - log.Crit("Failed to store trie node", "err", err) - } -} - // DeleteCode deletes the specified contract code from the database. func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { if err := db.Delete(codeKey(hash)); err != nil { log.Crit("Failed to delete contract code", "err", err) } } - -// DeleteTrieNode deletes the specified trie node from the database. -func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { - if err := db.Delete(hash.Bytes()); err != nil { - log.Crit("Failed to delete trie node", "err", err) - } -} diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go new file mode 100644 index 0000000000..a7a1fcbd10 --- /dev/null +++ b/core/rawdb/accessors_trie.go @@ -0,0 +1,263 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package rawdb + +import ( + "fmt" + "sync" + + "github.com/PlatONnetwork/PlatON-Go/common" + "github.com/PlatONnetwork/PlatON-Go/crypto" + "github.com/PlatONnetwork/PlatON-Go/ethdb" + "github.com/PlatONnetwork/PlatON-Go/log" + "golang.org/x/crypto/sha3" +) + +// HashScheme is the legacy hash-based state scheme with which trie nodes are +// stored in the disk with node hash as the database key. The advantage of this +// scheme is that different versions of trie nodes can be stored in disk, which +// is very beneficial for constructing archive nodes. The drawback is it will +// store different trie nodes on the same path to different locations on the disk +// with no data locality, and it's unfriendly for designing state pruning. +// +// Now this scheme is still kept for backward compatibility, and it will be used +// for archive node and some other tries(e.g. light trie). +const HashScheme = "hashScheme" + +// PathScheme is the new path-based state scheme with which trie nodes are stored +// in the disk with node path as the database key. This scheme will only store one +// version of state data in the disk, which means that the state pruning operation +// is native. At the same time, this scheme will put adjacent trie nodes in the same +// area of the disk with good data locality property. But this scheme needs to rely +// on extra state diffs to survive deep reorg. +const PathScheme = "pathScheme" + +// nodeHasher used to derive the hash of trie node. +type nodeHasher struct{ sha crypto.KeccakState } + +var hasherPool = sync.Pool{ + New: func() interface{} { return &nodeHasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, +} + +func newNodeHasher() *nodeHasher { return hasherPool.Get().(*nodeHasher) } +func returnHasherToPool(h *nodeHasher) { hasherPool.Put(h) } + +func (h *nodeHasher) hashData(data []byte) (n common.Hash) { + h.sha.Reset() + h.sha.Write(data) + h.sha.Read(n[:]) + return n +} + +// ReadAccountTrieNode retrieves the account trie node and the associated node +// hash with the specified node path. +func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) { + data, err := db.Get(accountTrieNodeKey(path)) + if err != nil { + return nil, common.Hash{} + } + hasher := newNodeHasher() + defer returnHasherToPool(hasher) + return data, hasher.hashData(data) +} + +// HasAccountTrieNode checks the account trie node presence with the specified +// node path and the associated node hash. +func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) bool { + data, err := db.Get(accountTrieNodeKey(path)) + if err != nil { + return false + } + hasher := newNodeHasher() + defer returnHasherToPool(hasher) + return hasher.hashData(data) == hash +} + +// WriteAccountTrieNode writes the provided account trie node into database. +func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) { + if err := db.Put(accountTrieNodeKey(path), node); err != nil { + log.Crit("Failed to store account trie node", "err", err) + } +} + +// DeleteAccountTrieNode deletes the specified account trie node from the database. +func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) { + if err := db.Delete(accountTrieNodeKey(path)); err != nil { + log.Crit("Failed to delete account trie node", "err", err) + } +} + +// ReadStorageTrieNode retrieves the storage trie node and the associated node +// hash with the specified node path. +func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) { + data, err := db.Get(storageTrieNodeKey(accountHash, path)) + if err != nil { + return nil, common.Hash{} + } + hasher := newNodeHasher() + defer returnHasherToPool(hasher) + return data, hasher.hashData(data) +} + +// HasStorageTrieNode checks the storage trie node presence with the provided +// node path and the associated node hash. +func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte, hash common.Hash) bool { + data, err := db.Get(storageTrieNodeKey(accountHash, path)) + if err != nil { + return false + } + hasher := newNodeHasher() + defer returnHasherToPool(hasher) + return hasher.hashData(data) == hash +} + +// WriteStorageTrieNode writes the provided storage trie node into database. +func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) { + if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil { + log.Crit("Failed to store storage trie node", "err", err) + } +} + +// DeleteStorageTrieNode deletes the specified storage trie node from the database. +func DeleteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte) { + if err := db.Delete(storageTrieNodeKey(accountHash, path)); err != nil { + log.Crit("Failed to delete storage trie node", "err", err) + } +} + +// ReadLegacyTrieNode retrieves the legacy trie node with the given +// associated node hash. +func ReadLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, err := db.Get(hash.Bytes()) + if err != nil { + return nil + } + return data +} + +// HasLegacyTrieNode checks if the trie node with the provided hash is present in db. +func HasLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool { + ok, _ := db.Has(hash.Bytes()) + return ok +} + +// WriteLegacyTrieNode writes the provided legacy trie node to database. +func WriteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) { + if err := db.Put(hash.Bytes(), node); err != nil { + log.Crit("Failed to store legacy trie node", "err", err) + } +} + +// DeleteLegacyTrieNode deletes the specified legacy trie node from database. +func DeleteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Delete(hash.Bytes()); err != nil { + log.Crit("Failed to delete legacy trie node", "err", err) + } +} + +// HasTrieNode checks the trie node presence with the provided node info and +// the associated node hash. +func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) bool { + switch scheme { + case HashScheme: + return HasLegacyTrieNode(db, hash) + case PathScheme: + if owner == (common.Hash{}) { + return HasAccountTrieNode(db, path, hash) + } + return HasStorageTrieNode(db, owner, path, hash) + default: + panic(fmt.Sprintf("Unknown scheme %v", scheme)) + } +} + +// ReadTrieNode retrieves the trie node from database with the provided node info +// and associated node hash. +// hashScheme-based lookup requires the following: +// - hash +// +// pathScheme-based lookup requires the following: +// - owner +// - path +func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte { + switch scheme { + case HashScheme: + return ReadLegacyTrieNode(db, hash) + case PathScheme: + var ( + blob []byte + nHash common.Hash + ) + if owner == (common.Hash{}) { + blob, nHash = ReadAccountTrieNode(db, path) + } else { + blob, nHash = ReadStorageTrieNode(db, owner, path) + } + if nHash != hash { + return nil + } + return blob + default: + panic(fmt.Sprintf("Unknown scheme %v", scheme)) + } +} + +// WriteTrieNode writes the trie node into database with the provided node info +// and associated node hash. +// hashScheme-based lookup requires the following: +// - hash +// +// pathScheme-based lookup requires the following: +// - owner +// - path +func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte, scheme string) { + switch scheme { + case HashScheme: + WriteLegacyTrieNode(db, hash, node) + case PathScheme: + if owner == (common.Hash{}) { + WriteAccountTrieNode(db, path, node) + } else { + WriteStorageTrieNode(db, owner, path, node) + } + default: + panic(fmt.Sprintf("Unknown scheme %v", scheme)) + } +} + +// DeleteTrieNode deletes the trie node from database with the provided node info +// and associated node hash. +// hashScheme-based lookup requires the following: +// - hash +// +// pathScheme-based lookup requires the following: +// - owner +// - path +func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, scheme string) { + switch scheme { + case HashScheme: + DeleteLegacyTrieNode(db, hash) + case PathScheme: + if owner == (common.Hash{}) { + DeleteAccountTrieNode(db, path) + } else { + DeleteStorageTrieNode(db, owner, path) + } + default: + panic(fmt.Sprintf("Unknown scheme %v", scheme)) + } +} diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index 35f9c8f1d3..23b980b5fd 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -86,11 +86,10 @@ func (f *chainFreezer) Close() error { // This functionality is deliberately broken off from block importing to avoid // incurring additional data shuffling delays on block propagation. func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { - nfdb := &nofreezedb{KeyValueStore: db} - var ( backoff bool triggered chan struct{} // Used in tests + nfdb = &nofreezedb{KeyValueStore: db} ) timer := time.NewTimer(freezerRecheckInterval) defer timer.Stop() diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go index 299e8108f4..53390f5875 100644 --- a/core/rawdb/chain_iterator.go +++ b/core/rawdb/chain_iterator.go @@ -191,7 +191,7 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan // in to be [to-1]. Therefore, setting lastNum to means that the // prqueue gap-evaluation will work correctly lastNum = to - queue = prque.New(nil) + queue = prque.New[int64, *blockTxHashes](nil) // for stats reporting blocks, txs = 0, 0 ) @@ -210,7 +210,7 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan break } // Next block available, pop it off and index it - delivery := queue.PopItem().(*blockTxHashes) + delivery := queue.PopItem() lastNum = delivery.number WriteTxLookupEntries(batch, delivery.number, delivery.hashes) blocks++ @@ -282,7 +282,7 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch // we expect the first number to come in to be [from]. Therefore, setting // nextNum to from means that the prqueue gap-evaluation will work correctly nextNum = from - queue = prque.New(nil) + queue = prque.New[int64, *blockTxHashes](nil) // for stats reporting blocks, txs = 0, 0 ) @@ -299,7 +299,7 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch if hook != nil && !hook(nextNum) { break } - delivery := queue.PopItem().(*blockTxHashes) + delivery := queue.PopItem() nextNum = delivery.number + 1 DeleteTxLookupEntries(batch, delivery.hashes) txs += len(delivery.hashes) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index b2124307e7..ee694df57f 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -22,6 +22,7 @@ import ( "fmt" "os" "path" + "path/filepath" "sync/atomic" "time" @@ -240,8 +241,8 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st if kvhash, _ := db.Get(headerHashKey(frozen)); len(kvhash) == 0 { // Subsequent header after the freezer limit is missing from the database. // Reject startup if the database has a more recent head. - if *ReadHeaderNumber(db, ReadHeadHeaderHash(db)) > frozen-1 { - return nil, fmt.Errorf("gap (#%d) in the chain between ancients and leveldb", frozen) + if ldbNum := *ReadHeaderNumber(db, ReadHeadHeaderHash(db)); ldbNum > frozen-1 { + return nil, fmt.Errorf("gap in the chain between ancients (#%d) and leveldb (#%d) ", frozen, ldbNum) } // Database contains only older data than the freezer, this happens if the // state was wiped and reinited from an existing freezer. @@ -260,7 +261,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 { return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path") } - // Block #1 is still in the database, we're allowed to init a new feezer + // Block #1 is still in the database, we're allowed to init a new freezer } // Otherwise, the head header is still the genesis, we're allowed to init a new // freezer. @@ -301,19 +302,87 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string, r if err != nil { return nil, err } + log.Info("Using LevelDB as the backing database") return NewDatabase(db), nil } -// NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a -// freezer moving immutable chain segments into cold storage. The passed ancient -// indicates the path of root ancient directory where the chain freezer can be -// opened. -func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, ancient string, namespace string, readonly bool) (ethdb.Database, error) { - kvdb, err := leveldb.New(file, cache, handles, namespace, readonly) +const ( + dbPebble = "pebble" + dbLeveldb = "leveldb" +) + +// hasPreexistingDb checks the given data directory whether a database is already +// instantiated at that location, and if so, returns the type of database (or the +// empty string). +func hasPreexistingDb(path string) string { + if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { + return "" // No pre-existing db + } + if matches, err := filepath.Glob(filepath.Join(path, "OPTIONS*")); len(matches) > 0 || err != nil { + if err != nil { + panic(err) // only possible if the pattern is malformed + } + return dbPebble + } + return dbLeveldb +} + +// OpenOptions contains the options to apply when opening a database. +// OBS: If AncientsDirectory is empty, it indicates that no freezer is to be used. +type OpenOptions struct { + Type string // "leveldb" | "pebble" + Directory string // the datadir + AncientsDirectory string // the ancients-dir + Namespace string // the namespace for database relevant metrics + Cache int // the capacity(in megabytes) of the data caching + Handles int // number of files to be open simultaneously + ReadOnly bool + // Ephemeral means that filesystem sync operations should be avoided: data integrity in the face of + // a crash is not important. This option should typically be used in tests. + Ephemeral bool +} + +// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble. +// +// type == null type != null +// +---------------------------------------- +// db is non-existent | leveldb default | specified type +// db is existent | from db | specified type (if compatible) +func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) { + existingDb := hasPreexistingDb(o.Directory) + if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb { + return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb) + } + if o.Type == dbPebble || existingDb == dbPebble { + if PebbleEnabled { + log.Info("Using pebble as the backing database") + return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral) + } else { + return nil, errors.New("db.engine 'pebble' not supported on this platform") + } + } + if len(o.Type) != 0 && o.Type != dbLeveldb { + return nil, fmt.Errorf("unknown db.engine %v", o.Type) + } + log.Info("Using leveldb as the backing database") + // Use leveldb, either as default (no explicit choice), or pre-existing, or chosen explicitly + return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) +} + +// Open opens both a disk-based key-value database such as leveldb or pebble, but also +// integrates it with a freezer database -- if the AncientDir option has been +// set on the provided OpenOptions. +// The passed o.AncientDir indicates the path of root ancient directory where +// the chain freezer can be opened. +func Open(o OpenOptions) (ethdb.Database, error) { + kvdb, err := openKeyValueDatabase(o) if err != nil { return nil, err } - frdb, err := NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly) + if len(o.AncientsDirectory) == 0 { + return kvdb, nil + } + frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly) if err != nil { kvdb.Close() return nil, err diff --git a/core/rawdb/databases_64bit.go b/core/rawdb/databases_64bit.go new file mode 100644 index 0000000000..49b9b4e87f --- /dev/null +++ b/core/rawdb/databases_64bit.go @@ -0,0 +1,37 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +//go:build arm64 || amd64 + +package rawdb + +import ( + "github.com/PlatONnetwork/PlatON-Go/ethdb" + "github.com/PlatONnetwork/PlatON-Go/ethdb/pebble" +) + +// Pebble is unsuported on 32bit architecture +const PebbleEnabled = true + +// NewPebbleDBDatabase creates a persistent key-value database without a freezer +// moving immutable chain segments into cold storage. +func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool, ephemeral bool) (ethdb.Database, error) { + db, err := pebble.New(file, cache, handles, namespace, readonly, ephemeral) + if err != nil { + return nil, err + } + return NewDatabase(db), nil +} diff --git a/core/rawdb/databases_non64bit.go b/core/rawdb/databases_non64bit.go new file mode 100644 index 0000000000..b8ab2ecada --- /dev/null +++ b/core/rawdb/databases_non64bit.go @@ -0,0 +1,34 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +//go:build !(arm64 || amd64) + +package rawdb + +import ( + "errors" + + "github.com/ethereum/go-ethereum/ethdb" +) + +// Pebble is unsuported on 32bit architecture +const PebbleEnabled = false + +// NewPebbleDBDatabase creates a persistent key-value database without a freezer +// moving immutable chain segments into cold storage. +func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { + return nil, errors.New("pebble is not supported on this platform") +} diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index 29271ca08c..56066e4cd0 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -862,7 +862,11 @@ func (t *freezerTable) advanceHead() error { return err } - // Close old file, and reopen in RDONLY mode. + // Commit the contents of the old file to stable storage and + // tear it down. It will be re-opened in read-only mode. + if err := t.head.Sync(); err != nil { + return err + } t.releaseFile(t.headId) t.openFile(t.headId, openFreezerFileForReadOnly) diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go index e9e1fc39d6..d65a3c0309 100644 --- a/core/rawdb/freezer_test.go +++ b/core/rawdb/freezer_test.go @@ -190,7 +190,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) { var item = make([]byte, 256) - for i := 0; i < 1000; i++ { + for i := 0; i < 10; i++ { // First reset and write 100 items. if err := f.TruncateHead(0); err != nil { t.Fatal("truncate failed:", err) diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 848f4aa919..aac33a1ff3 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -92,13 +92,17 @@ var ( SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value CodePrefix = []byte("c") // CodePrefix + code hash -> account code + // Path-based trie node scheme. + trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node + trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node + PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage configPrefix = []byte("ethereum-config-") // config prefix for the db economicModelPrefix = []byte("economicModel-key-") // economicModel prefix for the db economicModelExtendPrefix = []byte("economicModelExtend-key-") // economicModelExtend prefix for the db - // Chain index prefixes (use `i` + single byte to avoid mixing data types). + // BloomBitsIndexPrefix is the data table of a chain indexer to track its progres BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil) @@ -213,3 +217,13 @@ func economicModelKey(hash common.Hash) []byte { func economicModelExtendKey(hash common.Hash) []byte { return append(economicModelExtendPrefix, hash.Bytes()...) } + +// accountTrieNodeKey = trieNodeAccountPrefix + nodePath. +func accountTrieNodeKey(path []byte) []byte { + return append(trieNodeAccountPrefix, path...) +} + +// storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath. +func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte { + return append(append(trieNodeStoragePrefix, accountHash.Bytes()...), path...) +} diff --git a/core/state/database.go b/core/state/database.go index 9b77d5e5c8..b07276022c 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -89,7 +89,7 @@ type Trie interface { // TryDeleteAccount abstracts an account deletion from the trie. TryDeleteAccount(key []byte) error - Commit(onleaf trie.LeafCallback) (common.Hash, int, error) + Commit(onleaf trie.LeafCallback) (common.Hash, int) Hash() common.Hash NodeIterator(startKey []byte) trie.NodeIterator GetKey([]byte) []byte // TODO(fjl): remove this when SecureTrie is removed @@ -109,28 +109,38 @@ func NewDatabase(db ethdb.Database) Database { func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { csc, _ := lru.New(codeSizeCacheSize) return &cachingDB{ - db: trie.NewDatabaseWithConfig(db, config), disk: db, codeSizeCache: csc, codeCache: fastcache.New(codeCacheSize), + triedb: trie.NewDatabaseWithConfig(db, config), + } +} + +// NewDatabaseWithNodeDB creates a state database with an already initialized node database. +func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database { + return &cachingDB{ + disk: db, + codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), + codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), + triedb: triedb, } } type cachingDB struct { - db *trie.Database disk ethdb.KeyValueStore codeSizeCache *lru.Cache codeCache *fastcache.Cache + triedb *trie.Database } // OpenTrie opens the main account trie at a specific root hash. func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { - return trie.NewStateTrie(common.Hash{}, root, db.db) + return trie.NewStateTrie(common.Hash{}, root, db.triedb) } // OpenStorageTrie opens the storage trie of an account. func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) { - return trie.NewStateTrie(addrHash, root, db.db) + return trie.NewStateTrie(addrHash, root, db.triedb) } // CopyTrie returns an independent copy of the given trie. @@ -198,5 +208,5 @@ func (db *cachingDB) DiskDB() ethdb.KeyValueStore { // TrieDB retrieves any intermediate trie-node caching layer. func (db *cachingDB) TrieDB() *trie.Database { - return db.db + return db.triedb } diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go index 2b8394e8fc..1de960befb 100644 --- a/core/state/iterator_test.go +++ b/core/state/iterator_test.go @@ -17,12 +17,12 @@ package state import ( - "bytes" "testing" - "github.com/PlatONnetwork/PlatON-Go/common/vm" + "github.com/PlatONnetwork/PlatON-Go/core/rawdb" "github.com/PlatONnetwork/PlatON-Go/common" + "github.com/PlatONnetwork/PlatON-Go/common/vm" ) var TestPlatONPrecompiledContracts = map[common.Address]interface{}{ @@ -49,10 +49,10 @@ func (pcc *TestPrecompiledContractCheck) IsPlatONPrecompiledContract(address com func TestNodeIteratorCoverage(t *testing.T) { vm.PrecompiledContractCheckInstance = &TestPrecompiledContractCheck{} // Create some arbitrary test state to iterate - db, root, _ := makeTestState() - db.TrieDB().Commit(root, false, true) + db, sdb, root, _ := makeTestState() + sdb.TrieDB().Commit(root, false, false) - state, err := New(root, db, nil) + state, err := New(root, sdb, nil) if err != nil { t.Fatalf("failed to create state trie at %x: %v", root, err) } @@ -63,29 +63,54 @@ func TestNodeIteratorCoverage(t *testing.T) { hashes[it.Hash] = struct{}{} } } - // Cross check the iterated hashes and the database/nodepool content - for hash := range hashes { - if _, err = db.TrieDB().Node(hash); err != nil { - _, err = db.ContractCode(common.Hash{}, hash) - } - if err != nil { - t.Errorf("failed to retrieve reported node %x", hash) - } - } - for _, hash := range db.TrieDB().Nodes() { - if _, ok := hashes[hash]; !ok { - t.Errorf("state entry not reported %x", hash) + // Check in-disk nodes + var ( + seenNodes = make(map[common.Hash]struct{}) + seenCodes = make(map[common.Hash]struct{}) + ) + it := db.NewIterator(nil, nil) + for it.Next() { + ok, hash := isTrieNode(sdb.TrieDB().Scheme(), it.Key(), it.Value()) + if !ok { + continue } + seenNodes[hash] = struct{}{} } - it := db.DiskDB().NewIterator(nil, nil) + it.Release() + + // Check in-disk codes + it = db.NewIterator(nil, nil) for it.Next() { - key := it.Key() - if bytes.HasPrefix(key, []byte("secure-key-")) { + ok, hash := rawdb.IsCodeKey(it.Key()) + if !ok { continue } - if _, ok := hashes[common.BytesToHash(key)]; !ok { - t.Errorf("state entry not reported %x", key) + if _, ok := hashes[common.BytesToHash(hash)]; !ok { + t.Errorf("state entry not reported %x", it.Key()) } + seenCodes[common.BytesToHash(hash)] = struct{}{} } it.Release() + + // Cross check the iterated hashes and the database/nodepool content + for hash := range hashes { + _, ok := seenNodes[hash] + if !ok { + _, ok = seenCodes[hash] + } + if !ok { + t.Errorf("failed to retrieve reported node %x", hash) + } + } +} + +// isTrieNode is a helper function which reports if the provided +// database entry belongs to a trie node or not. +func isTrieNode(scheme string, key, val []byte) (bool, common.Hash) { + if scheme == rawdb.HashScheme { + if len(key) == common.HashLength { + return true, common.BytesToHash(key) + } + } + return false, common.Hash{} } diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index e89d16ab7f..4836f50a92 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -279,7 +279,7 @@ func (p *Pruner) Prune(root common.Hash) error { // Ensure the root is really present. The weak assumption // is the presence of root can indicate the presence of the // entire trie. - if !rawdb.HasTrieNode(p.db, root) { + if !rawdb.HasLegacyTrieNode(p.db, root) { log.Error("Could not find the trie node corresponding to root", "root", root.TerminalString()) return fmt.Errorf("could not find the trie node corresponding to root:%s", root) } @@ -340,7 +340,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err } headBlock := rawdb.ReadHeadBlock(db) if headBlock == nil { - return errors.New("Failed to load head block") + return errors.New("failed to load head block") } // Initialize the snapshot tree in recovery mode to handle this special case: // - Users run the `prune-state` command multiple times diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go index 4e0ac23481..45bceba48a 100644 --- a/core/state/snapshot/conversion.go +++ b/core/state/snapshot/conversion.go @@ -43,7 +43,7 @@ type trieKV struct { type ( // trieGeneratorFn is the interface of trie generation which can // be implemented by different trie algorithm. - trieGeneratorFn func(db ethdb.KeyValueWriter, owner common.Hash, in chan (trieKV), out chan (common.Hash)) + trieGeneratorFn func(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan (trieKV), out chan (common.Hash)) // leafCallbackFn is the callback invoked at the leaves of the trie, // returns the subtrie root with the specified subtrie identifier. @@ -52,12 +52,12 @@ type ( // GenerateAccountTrieRoot takes an account iterator and reproduces the root hash. func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) { - return generateTrieRoot(nil, it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true) + return generateTrieRoot(nil, "", it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true) } // GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash. func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) { - return generateTrieRoot(nil, it, account, stackTrieGenerate, nil, newGenerateStats(), true) + return generateTrieRoot(nil, "", it, account, stackTrieGenerate, nil, newGenerateStats(), true) } // GenerateTrie takes the whole snapshot tree as the input, traverses all the @@ -71,7 +71,8 @@ func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethd } defer acctIt.Release() - got, err := generateTrieRoot(dst, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { + scheme := snaptree.triedb.Scheme() + got, err := generateTrieRoot(dst, scheme, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { // Migrate the code first, commit the contract code into the tmp db. if codeHash != emptyCode { code := rawdb.ReadCode(src, codeHash) @@ -87,7 +88,7 @@ func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethd } defer storageIt.Release() - hash, err := generateTrieRoot(dst, storageIt, accountHash, stackTrieGenerate, nil, stat, false) + hash, err := generateTrieRoot(dst, scheme, storageIt, accountHash, stackTrieGenerate, nil, stat, false) if err != nil { return common.Hash{}, err } @@ -136,7 +137,7 @@ func (stat *generateStats) progressAccounts(account common.Hash, done uint64) { stat.head = account } -// finishAccounts updates the gemerator stats for the finished account range. +// finishAccounts updates the generator stats for the finished account range. func (stat *generateStats) finishAccounts(done uint64) { stat.lock.Lock() defer stat.lock.Unlock() @@ -242,7 +243,7 @@ func runReport(stats *generateStats, stop chan bool) { // generateTrieRoot generates the trie hash based on the snapshot iterator. // It can be used for generating account trie, storage trie or even the // whole state which connects the accounts and the corresponding storages. -func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) { +func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) { var ( in = make(chan trieKV) // chan to pass leaves out = make(chan common.Hash, 1) // chan to collect result @@ -253,7 +254,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash, wg.Add(1) go func() { defer wg.Done() - generatorFn(db, account, in, out) + generatorFn(db, scheme, account, in, out) }() // Spin up a go-routine for progress logging if report && stats != nil { @@ -360,8 +361,14 @@ func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash, return stop(nil) } -func stackTrieGenerate(db ethdb.KeyValueWriter, owner common.Hash, in chan trieKV, out chan common.Hash) { - t := trie.NewStackTrieWithOwner(db, owner) +func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) { + var nodeWriter trie.NodeWriteFunc + if db != nil { + nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme) + } + } + t := trie.NewStackTrieWithOwner(nodeWriter, owner) for leaf := range in { t.TryUpdate(leaf.key[:], leaf.value) } diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index 9c6f62d87f..490af981cd 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -23,10 +23,8 @@ import ( "math/big" "time" - "github.com/PlatONnetwork/PlatON-Go/common/hexutil" - "github.com/PlatONnetwork/PlatON-Go/ethdb/memorydb" - "github.com/PlatONnetwork/PlatON-Go/common" + "github.com/PlatONnetwork/PlatON-Go/common/hexutil" "github.com/PlatONnetwork/PlatON-Go/core/rawdb" "github.com/PlatONnetwork/PlatON-Go/crypto" "github.com/PlatONnetwork/PlatON-Go/ethdb" @@ -360,16 +358,22 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, owner common.Hash, roo } // We use the snap data to build up a cache which can be used by the // main account trie as a primary lookup when resolving hashes - var snapNodeCache ethdb.KeyValueStore + var resolver trie.NodeResolver if len(result.keys) > 0 { - snapNodeCache = memorydb.New() - snapTrieDb := trie.NewDatabase(snapNodeCache) - snapTrie, _ := trie.New(owner, common.Hash{}, snapTrieDb) + mdb := rawdb.NewMemoryDatabase() + tdb := trie.NewDatabase(mdb) + snapTrie := trie.NewEmpty(tdb) for i, key := range result.keys { snapTrie.Update(key, result.vals[i]) } - root, _, _ := snapTrie.Commit(nil) - snapTrieDb.Commit(root, false, false) + root, nodes, err := snapTrie.Commit(nil) + if err == nil && nodes != nil { + tdb.Update(trie.NewWithNodeSet(nodes)) + tdb.Commit(root, false, false) + } + resolver = func(owner common.Hash, path []byte, hash common.Hash) []byte { + return rawdb.ReadTrieNode(mdb, owner, path, hash, tdb.Scheme()) + } } // Construct the trie for state iteration, reuse the trie // if it's already opened with some nodes resolved. @@ -398,7 +402,7 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, owner common.Hash, roo start = time.Now() internal time.Duration ) - nodeIt.AddResolver(snapNodeCache) + nodeIt.AddResolver(resolver) for iter.Next() { if last != nil && bytes.Compare(iter.Key, last) > 0 { diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go index 197bba9f5a..2aa9668270 100644 --- a/core/state/snapshot/generate_test.go +++ b/core/state/snapshot/generate_test.go @@ -28,7 +28,6 @@ import ( "github.com/PlatONnetwork/PlatON-Go/common" "github.com/PlatONnetwork/PlatON-Go/core/rawdb" "github.com/PlatONnetwork/PlatON-Go/ethdb" - "github.com/PlatONnetwork/PlatON-Go/ethdb/memorydb" "github.com/PlatONnetwork/PlatON-Go/log" "github.com/PlatONnetwork/PlatON-Go/rlp" "github.com/PlatONnetwork/PlatON-Go/trie" @@ -119,12 +118,12 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) { accIt := snap.AccountIterator(common.Hash{}) defer accIt.Release() - snapRoot, err := generateTrieRoot(nil, accIt, common.Hash{}, stackTrieGenerate, + snapRoot, err := generateTrieRoot(nil, "", accIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { storageIt, _ := snap.StorageIterator(accountHash, common.Hash{}) defer storageIt.Release() - hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false) + hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false) if err != nil { return common.Hash{}, err } @@ -377,7 +376,7 @@ func TestGenerateCorruptAccountTrie(t *testing.T) { // a fake one manually. We're going with a small account trie of 3 accounts, // without any storage slots to keep the test smaller. var ( - diskdb = memorydb.New() + diskdb = rawdb.NewMemoryDatabase() triedb = trie.NewDatabase(diskdb) ) tr, _ := trie.NewSecure(common.Hash{}, common.Hash{}, triedb) @@ -421,7 +420,7 @@ func TestGenerateMissingStorageTrie(t *testing.T) { // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. var ( - diskdb = memorydb.New() + diskdb = rawdb.NewMemoryDatabase() triedb = trie.NewDatabase(diskdb) ) stTrie, _ := trie.NewSecure(common.Hash{}, common.Hash{}, triedb) @@ -480,7 +479,7 @@ func TestGenerateCorruptStorageTrie(t *testing.T) { // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. var ( - diskdb = memorydb.New() + diskdb = rawdb.NewMemoryDatabase() triedb = trie.NewDatabase(diskdb) ) stTrie, _ := trie.NewSecure(common.Hash{}, common.Hash{}, triedb) diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index c42c16af21..1a6eeeb94d 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -781,14 +781,14 @@ func (t *Tree) Verify(root common.Hash) error { } defer acctIt.Release() - got, err := generateTrieRoot(nil, acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { + got, err := generateTrieRoot(nil, "", acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { storageIt, err := t.StorageIterator(root, accountHash, common.Hash{}) if err != nil { return common.Hash{}, err } defer storageIt.Release() - hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false) + hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false) if err != nil { return common.Hash{}, err } diff --git a/core/state/state_object.go b/core/state/state_object.go index ee8ef83bdb..8207a104ed 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -409,12 +409,10 @@ func (s *stateObject) CommitTrie(db Database) (int, error) { if metrics.EnabledExpensive { defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now()) } - root, committed, err := s.trie.Commit(nil) + root, committed := s.trie.Commit(nil) - if err == nil { - s.data.Root = root - } - return committed, err + s.data.Root = root + return committed, nil } // AddBalance adds amount to s's balance. diff --git a/core/state/statedb.go b/core/state/statedb.go index 3fc322532f..4fe503051f 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -230,6 +230,7 @@ func (s *StateDB) Reset(root common.Hash) error { s.stateObjects = make(map[common.Address]*stateObject) s.stateObjectsPending = make(map[common.Address]struct{}) s.stateObjectsDirty = make(map[common.Address]struct{}) + s.stateObjectsDestruct = make(map[common.Address]struct{}) s.thash = common.Hash{} s.txIndex = 0 s.logs = make(map[common.Hash][]*types.Log) @@ -1342,7 +1343,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { start = time.Now() } // Write trie changes. - root, accountCommitted, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash, _ []byte) error { + root, accountCommitted := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash, _ []byte) error { var account types.StateAccount if err := rlp.DecodeBytes(leaf, &account); err != nil { return nil @@ -1352,9 +1353,6 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { } return nil }) - if err != nil { - return common.Hash{}, err - } if metrics.EnabledExpensive { s.AccountCommits += time.Since(start) diff --git a/core/state/sync.go b/core/state/sync.go index 54a3e67299..34f0471f01 100644 --- a/core/state/sync.go +++ b/core/state/sync.go @@ -27,7 +27,7 @@ import ( ) // NewStateSync create a new state trie download scheduler. -func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(keys [][]byte, leaf []byte) error) *trie.Sync { +func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(keys [][]byte, leaf []byte) error, scheme string) *trie.Sync { // Register the storage slot callback if the external callback is specified. var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error if onLeaf != nil { @@ -52,6 +52,6 @@ func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(k syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), path, parent, parentPath) return nil } - syncer = trie.NewSync(root, database, onAccount) + syncer = trie.NewSync(root, database, onAccount, scheme) return syncer } diff --git a/core/state/sync_test.go b/core/state/sync_test.go index e615d60433..267a4a7f85 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -40,10 +40,11 @@ type testAccount struct { } // makeTestState create a sample test state to test node-wise reconstruction. -func makeTestState() (Database, common.Hash, []*testAccount) { +func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { // Create an empty state - db := NewDatabase(rawdb.NewMemoryDatabase()) - state, _ := New(common.Hash{}, db, nil) + db := rawdb.NewMemoryDatabase() + sdb := NewDatabase(db) + state, _ := New(common.Hash{}, sdb, nil) // Fill it with some arbitrary data var accounts []*testAccount @@ -74,7 +75,7 @@ func makeTestState() (Database, common.Hash, []*testAccount) { root, _ := state.Commit(false) // Return the generated state - return db, root, accounts + return db, sdb, root, accounts } // checkStateAccounts cross references a reconstructed state with an expected @@ -102,7 +103,7 @@ func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accou } // checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present. -func checkTrieConsistency(db ethdb.KeyValueStore, root common.Hash) error { +func checkTrieConsistency(db ethdb.Database, root common.Hash) error { if v, _ := db.Get(root[:]); v == nil { return nil // Consider a non existent state consistent. } @@ -134,8 +135,9 @@ func checkStateConsistency(db ethdb.Database, root common.Hash) error { // Tests that an empty state is not scheduled for syncing. func TestEmptyStateSync(t *testing.T) { + db := trie.NewDatabase(rawdb.NewMemoryDatabase()) empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), nil) + sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), nil, db.Scheme()) if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { t.Errorf("content requested for empty state: %v, %v, %v", nodes, paths, codes) } @@ -173,7 +175,7 @@ type stateElement struct { func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { vm.PrecompiledContractCheckInstance = &TestPrecompiledContractCheck{} // Create a random state to copy - srcDb, srcRoot, srcAccounts := makeTestState() + _, srcDb, srcRoot, srcAccounts := makeTestState() if commit { srcDb.TrieDB().Commit(srcRoot, false, true) } @@ -181,7 +183,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil) + sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) var ( nodeElements []stateElement @@ -283,11 +285,11 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { // partial results are returned, and the others sent only later. func TestIterativeDelayedStateSync(t *testing.T) { // Create a random state to copy - srcDb, srcRoot, srcAccounts := makeTestState() + _, srcDb, srcRoot, srcAccounts := makeTestState() // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil) + sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) var ( nodeElements []stateElement @@ -376,11 +378,11 @@ func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomS func testIterativeRandomStateSync(t *testing.T, count int) { // Create a random state to copy - srcDb, srcRoot, srcAccounts := makeTestState() + _, srcDb, srcRoot, srcAccounts := makeTestState() // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil) + sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) nodeQueue := make(map[string]stateElement) codeQueue := make(map[common.Hash]struct{}) @@ -456,11 +458,11 @@ func testIterativeRandomStateSync(t *testing.T, count int) { // partial results are returned (Even those randomly), others sent only later. func TestIterativeRandomDelayedStateSync(t *testing.T) { // Create a random state to copy - srcDb, srcRoot, srcAccounts := makeTestState() + _, srcDb, srcRoot, srcAccounts := makeTestState() // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil) + sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) nodeQueue := make(map[string]stateElement) codeQueue := make(map[common.Hash]struct{}) @@ -546,7 +548,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { // the database. func TestIncompleteStateSync(t *testing.T) { // Create a random state to copy - srcDb, srcRoot, srcAccounts := makeTestState() + db, srcDb, srcRoot, srcAccounts := makeTestState() // isCodeLookup to save some hashing var isCode = make(map[common.Hash]struct{}) @@ -556,15 +558,16 @@ func TestIncompleteStateSync(t *testing.T) { } } isCode[common.BytesToHash(emptyCodeHash)] = struct{}{} - checkTrieConsistency(srcDb.DiskDB(), srcRoot) + checkTrieConsistency(db, srcRoot) // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil) + sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) var ( - addedCodes []common.Hash - addedNodes []common.Hash + addedCodes []common.Hash + addedPaths []string + addedHashes []common.Hash ) nodeQueue := make(map[string]stateElement) codeQueue := make(map[common.Hash]struct{}) @@ -601,15 +604,16 @@ func TestIncompleteStateSync(t *testing.T) { var nodehashes []common.Hash if len(nodeQueue) > 0 { results := make([]trie.NodeSyncResult, 0, len(nodeQueue)) - for key, element := range nodeQueue { + for path, element := range nodeQueue { data, err := srcDb.TrieDB().Node(element.hash) if err != nil { t.Fatalf("failed to retrieve node data for %x", element.hash) } - results = append(results, trie.NodeSyncResult{Path: key, Data: data}) + results = append(results, trie.NodeSyncResult{Path: path, Data: data}) if element.hash != srcRoot { - addedNodes = append(addedNodes, element.hash) + addedPaths = append(addedPaths, element.path) + addedHashes = append(addedHashes, element.hash) } nodehashes = append(nodehashes, element.hash) } @@ -649,20 +653,18 @@ func TestIncompleteStateSync(t *testing.T) { } } // Sanity check that removing any node from the database is detected - for _, node := range addedCodes { - val := rawdb.ReadCode(dstDb, node) - rawdb.DeleteCode(dstDb, node) - if err := checkStateConsistency(dstDb, srcRoot); err == nil { - t.Errorf("trie inconsistency not caught, missing: %x", node) - } - rawdb.WriteCode(dstDb, node, val) - } - for _, node := range addedNodes { - val := rawdb.ReadTrieNode(dstDb, node) - rawdb.DeleteTrieNode(dstDb, node) + scheme := srcDb.TrieDB().Scheme() + for i, path := range addedPaths { + owner, inner := trie.ResolvePath([]byte(path)) + hash := addedHashes[i] + val := rawdb.ReadTrieNode(dstDb, owner, inner, hash, scheme) + if val == nil { + t.Error("missing trie node") + } + rawdb.DeleteTrieNode(dstDb, owner, inner, hash, scheme) if err := checkStateConsistency(dstDb, srcRoot); err == nil { - t.Errorf("trie inconsistency not caught, missing: %v", node.Hex()) + t.Errorf("trie inconsistency not caught, missing: %v", path) } - rawdb.WriteTrieNode(dstDb, node, val) + rawdb.WriteTrieNode(dstDb, owner, inner, hash, val, scheme) } } diff --git a/core/state_processor.go b/core/state_processor.go index 8b301d7735..18f2f52f97 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -109,9 +109,14 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg return nil, nil, 0, err } } - + isDirac := gov.Gte160VersionState(statedb) + // Fail if Shanghai not enabled and len(withdrawals) is non-zero. + withdrawals := block.Withdrawals() + if len(withdrawals) > 0 && !isDirac { + return nil, nil, 0, fmt.Errorf("withdrawals before dirac") + } // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) - p.engine.Finalize(p.bc, header, statedb, block.Transactions(), receipts) + p.engine.Finalize(p.bc, header, statedb, block.Transactions(), receipts, withdrawals) return receipts, allLogs, *usedGas, nil } diff --git a/core/tx_pool.go b/core/tx_pool.go index e4b663a4a2..e8a9119ec6 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -27,7 +27,6 @@ import ( "time" "github.com/PlatONnetwork/PlatON-Go/consensus/misc" - "github.com/PlatONnetwork/PlatON-Go/x/gov" "github.com/PlatONnetwork/PlatON-Go/common" @@ -750,6 +749,10 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if uint64(tx.Size()) > txMaxSize { return ErrOversizedData } + // Check whether the init code size has been exceeded. + if pool.shanghai && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { + return fmt.Errorf("%w: code size %v limit %v", ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) + } // Transactions can't be negative. This may never happen using RLP decoded // transactions but may occur if you create a transaction using the RPC. if tx.Value().Sign() < 0 { @@ -1611,7 +1614,7 @@ func (pool *TxPool) truncatePending() { pendingBeforeCap := pending // Assemble a spam order to penalize large transactors first - spammers := prque.New(nil) + spammers := prque.New[int64, common.Address](nil) for addr, list := range pool.pending { // Only evict transactions from high rollers txLength := list.Len() @@ -1624,12 +1627,12 @@ func (pool *TxPool) truncatePending() { for pending > pool.config.GlobalSlots && !spammers.Empty() { // Retrieve the next offender if not local address offender, _ := spammers.Pop() - offenders = append(offenders, offender.(common.Address)) + offenders = append(offenders, offender) // Equalize balances until all the same or below threshold if len(offenders) > 1 { // Calculate the equalization threshold for all current offenders - threshold := pool.pending[offender.(common.Address)].Len() + threshold := pool.pending[offender].Len() // Iteratively reduce all offenders until below limit or threshold reached for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { diff --git a/core/types/block.go b/core/types/block.go index 9867579598..f0fc946e4f 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -130,6 +130,9 @@ type Header struct { // BaseFee was added by EIP-1559 and is ignored in legacy headers. BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` + // WithdrawalsHash was added by EIP-4895 and is ignored in legacy headers. + WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + // caches sealHash atomic.Value `json:"-" rlp:"-"` hash atomic.Value `json:"-" rlp:"-"` @@ -341,9 +344,12 @@ func (h *Header) ExtraData() []byte { } // EmptyBody returns true if there is no additional 'body' to complete the header -// that is: no transactions and no uncles. +// that is: no transactions, no uncles and no withdrawals. func (h *Header) EmptyBody() bool { - return h.TxHash == EmptyRootHash + if h.WithdrawalsHash == nil { + return h.TxHash == EmptyRootHash + } + return h.TxHash == EmptyRootHash && *h.WithdrawalsHash == EmptyRootHash } // EmptyReceipts returns true if there are no receipts for this header/block. @@ -356,12 +362,14 @@ func (h *Header) EmptyReceipts() bool { type Body struct { Transactions []*Transaction ExtraData []byte + Withdrawals []*Withdrawal `rlp:"optional"` } // Block represents an entire block in the Ethereum blockchain. type Block struct { header *Header transactions Transactions + withdrawals Withdrawals // caches hash atomic.Value @@ -378,9 +386,10 @@ type Block struct { // "external" block encoding. used for eth protocol, etc. type extblock struct { - Header *Header - Txs []*Transaction - ExtraData []byte + Header *Header + Txs []*Transaction + ExtraData []byte + Withdrawals []*Withdrawal `rlp:"optional"` } // NewBlock creates a new block. The input data is copied, @@ -412,6 +421,28 @@ func NewBlock(header *Header, txs []*Transaction, receipts []*Receipt, hasher Tr return b } +// NewBlockWithWithdrawals creates a new block with withdrawals. The input data +// is copied, changes to header and to the field values will not +// affect the block. +// +// The values of TxHash, UncleHash, ReceiptHash and Bloom in header +// are ignored and set to values derived from the given txs, uncles +// and receipts. +func NewBlockWithWithdrawals(header *Header, txs []*Transaction, receipts []*Receipt, withdrawals []*Withdrawal, hasher TrieHasher) *Block { + b := NewBlock(header, txs, receipts, hasher) + + if withdrawals == nil { + b.header.WithdrawalsHash = nil + } else if len(withdrawals) == 0 { + b.header.WithdrawalsHash = &EmptyRootHash + } else { + h := DeriveSha(Withdrawals(withdrawals), hasher) + b.header.WithdrawalsHash = &h + } + + return b.WithWithdrawals(withdrawals) +} + // NewBlockWithHeader creates a block with the given header data. The // header data is copied, changes to header and to the field values // will not affect the block. @@ -443,6 +474,9 @@ func CopyHeader(h *Header) *Header { cpy.Extra = make([]byte, len(h.Extra)) copy(cpy.Extra, h.Extra) } + if h.WithdrawalsHash != nil { + *cpy.WithdrawalsHash = *h.WithdrawalsHash + } return &cpy } @@ -453,7 +487,7 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error { if err := s.Decode(&eb); err != nil { return err } - b.header, b.transactions, b.extraData = eb.Header, eb.Txs, eb.ExtraData + b.header, b.transactions, b.extraData, b.withdrawals = eb.Header, eb.Txs, eb.ExtraData, eb.Withdrawals b.size.Store(common.StorageSize(rlp.ListSize(size))) return nil } @@ -461,9 +495,10 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error { // EncodeRLP serializes b into the Ethereum RLP block format. func (b *Block) EncodeRLP(w io.Writer) error { return rlp.Encode(w, extblock{ - Header: b.header, - Txs: b.transactions, - ExtraData: b.extraData, + Header: b.header, + Txs: b.transactions, + ExtraData: b.extraData, + Withdrawals: b.withdrawals, }) } @@ -501,10 +536,14 @@ func (b *Block) BaseFee() *big.Int { return new(big.Int).Set(b.header.BaseFee) } +func (b *Block) Withdrawals() Withdrawals { + return b.withdrawals +} + func (b *Block) Header() *Header { return CopyHeader(b.header) } // Body returns the non-header content of the block. -func (b *Block) Body() *Body { return &Body{b.transactions, b.extraData} } +func (b *Block) Body() *Body { return &Body{b.transactions, b.extraData, b.withdrawals} } // Size returns the true RLP encoded storage size of the block, either by encoding // and returning it, or returning a previsouly cached value. @@ -539,6 +578,7 @@ func (b *Block) WithSeal(header *Header) *Block { return &Block{ header: &cpy, transactions: b.transactions, + withdrawals: b.withdrawals, } } @@ -554,6 +594,15 @@ func (b *Block) WithBody(transactions []*Transaction, extraData []byte) *Block { return block } +// WithWithdrawals sets the withdrawal contents of a block, does not return a new block. +func (b *Block) WithWithdrawals(withdrawals []*Withdrawal) *Block { + if withdrawals != nil { + b.withdrawals = make([]*Withdrawal, len(withdrawals)) + copy(b.withdrawals, withdrawals) + } + return b +} + // Hash returns the keccak256 hash of b's header. // The hash is computed on the first call and cached thereafter. func (b *Block) Hash() common.Hash { diff --git a/core/types/gen_withdrawal_json.go b/core/types/gen_withdrawal_json.go new file mode 100644 index 0000000000..7137966435 --- /dev/null +++ b/core/types/gen_withdrawal_json.go @@ -0,0 +1,55 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package types + +import ( + "encoding/json" + + "github.com/PlatONnetwork/PlatON-Go/common" + "github.com/PlatONnetwork/PlatON-Go/common/hexutil" +) + +var _ = (*withdrawalMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (w Withdrawal) MarshalJSON() ([]byte, error) { + type Withdrawal struct { + Index hexutil.Uint64 `json:"index"` + Validator hexutil.Uint64 `json:"validatorIndex"` + Address common.Address `json:"address"` + Amount hexutil.Uint64 `json:"amount"` + } + var enc Withdrawal + enc.Index = hexutil.Uint64(w.Index) + enc.Validator = hexutil.Uint64(w.Validator) + enc.Address = w.Address + enc.Amount = hexutil.Uint64(w.Amount) + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (w *Withdrawal) UnmarshalJSON(input []byte) error { + type Withdrawal struct { + Index *hexutil.Uint64 `json:"index"` + Validator *hexutil.Uint64 `json:"validatorIndex"` + Address *common.Address `json:"address"` + Amount *hexutil.Uint64 `json:"amount"` + } + var dec Withdrawal + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.Index != nil { + w.Index = uint64(*dec.Index) + } + if dec.Validator != nil { + w.Validator = uint64(*dec.Validator) + } + if dec.Address != nil { + w.Address = *dec.Address + } + if dec.Amount != nil { + w.Amount = uint64(*dec.Amount) + } + return nil +} diff --git a/core/types/gen_withdrawal_rlp.go b/core/types/gen_withdrawal_rlp.go new file mode 100644 index 0000000000..69ef39a975 --- /dev/null +++ b/core/types/gen_withdrawal_rlp.go @@ -0,0 +1,23 @@ +// Code generated by rlpgen. DO NOT EDIT. + +//go:build !norlpgen +// +build !norlpgen + +package types + +import ( + "io" + + "github.com/PlatONnetwork/PlatON-Go/rlp" +) + +func (obj *Withdrawal) EncodeRLP(_w io.Writer) error { + w := rlp.NewEncoderBuffer(_w) + _tmp0 := w.List() + w.WriteUint64(obj.Index) + w.WriteUint64(obj.Validator) + w.WriteBytes(obj.Address[:]) + w.WriteUint64(obj.Amount) + w.ListEnd(_tmp0) + return w.Flush() +} diff --git a/core/types/withdrawal.go b/core/types/withdrawal.go new file mode 100644 index 0000000000..0f02cd99b6 --- /dev/null +++ b/core/types/withdrawal.go @@ -0,0 +1,56 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "bytes" + + "github.com/PlatONnetwork/PlatON-Go/common" + "github.com/PlatONnetwork/PlatON-Go/common/hexutil" + "github.com/PlatONnetwork/PlatON-Go/rlp" +) + +//go:generate go run github.com/fjl/gencodec -type Withdrawal -field-override withdrawalMarshaling -out gen_withdrawal_json.go +//go:generate go run ../../rlp/rlpgen -type Withdrawal -out gen_withdrawal_rlp.go + +// Withdrawal represents a validator withdrawal from the consensus layer. +type Withdrawal struct { + Index uint64 `json:"index"` // monotonically increasing identifier issued by consensus layer + Validator uint64 `json:"validatorIndex"` // index of validator associated with withdrawal + Address common.Address `json:"address"` // target address for withdrawn ether + Amount uint64 `json:"amount"` // value of withdrawal in Gwei +} + +// field type overrides for gencodec +type withdrawalMarshaling struct { + Index hexutil.Uint64 + Validator hexutil.Uint64 + Amount hexutil.Uint64 +} + +// Withdrawals implements DerivableList for withdrawals. +type Withdrawals []*Withdrawal + +// Len returns the length of s. +func (s Withdrawals) Len() int { return len(s) } + +// EncodeIndex encodes the i'th withdrawal to w. Note that this does not check for errors +// because we assume that *Withdrawal will only ever contain valid withdrawals that were either +// constructed by decoding or via public API in this package. +func (s Withdrawals) EncodeIndex(i int, w *bytes.Buffer) { + rlp.Encode(w, s[i]) +} diff --git a/core/vm/evm.go b/core/vm/evm.go index 2089c7c167..5693bab8b0 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -165,7 +165,7 @@ type BlockContext struct { Coinbase common.Address // Provides information for COINBASE GasLimit uint64 // Provides information for GASLIMIT BlockNumber *big.Int // Provides information for NUMBER - Time *big.Int // Provides information for TIME + Time uint64 // Provides information for TIME Difficulty *big.Int // Provides information for DIFFICULTY (This one must not be deleted, otherwise the solidity contract will be failed) Nonce types.BlockNonce BaseFee *big.Int // Provides information for BASEFEE @@ -238,7 +238,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, snapshotDB snapshotdb.DB, st chainRules: chainConfig.Rules(blockCtx.BlockNumber), interpreters: make([]Interpreter, 0, 1), } - evm.interpreters = append(evm.interpreters, NewEVMInterpreter(evm, config)) + evm.interpreters = append(evm.interpreters, NewEVMInterpreter(evm)) evm.interpreters = append(evm.interpreters, NewWASMInterpreter(evm, config)) evm.interpreter = evm.interpreters[0] @@ -285,6 +285,13 @@ func (evm *EVM) Interpreter() Interpreter { return evm.interpreter } +// SetBlockContext updates the block context of the EVM. +func (evm *EVM) SetBlockContext(blockCtx BlockContext) { + evm.Context = blockCtx + num := blockCtx.BlockNumber + evm.chainRules = evm.chainConfig.Rules(num) +} + // Call executes the contract associated with the addr with the given input as // parameters. It also handles any necessary value transfer required and takes // the necessary steps to create accounts and reverses the state in case of an @@ -434,7 +441,11 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by // Invoke tracer hooks that signal entering/exiting a call frame if evm.Config.Debug { - evm.Config.Tracer.CaptureEnter(DELEGATECALL, caller.Address(), addr, input, gas, nil) + // NOTE: caller must, at all times be a contract. It should never happen + // that caller is something other than a Contract. + parent := caller.(*Contract) + // DELEGATECALL inherits value from parent call + evm.Config.Tracer.CaptureEnter(DELEGATECALL, caller.Address(), addr, input, gas, parent.value) defer func(startGas uint64) { evm.Config.Tracer.CaptureExit(ret, startGas-gas, err) }(gas) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index e90ff31092..5b40d39732 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -468,8 +468,7 @@ func opCoinbase(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ } func opTimestamp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - v, _ := uint256.FromBig(interpreter.evm.Context.Time) - scope.Stack.push(v) + scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.Time)) return nil, nil } @@ -863,9 +862,9 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance) interpreter.evm.StateDB.Suicide(scope.Contract.Address()) - if interpreter.cfg.Debug { - interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) - interpreter.cfg.Tracer.CaptureExit([]byte{}, 0, nil) + if interpreter.evm.Config.Debug { + interpreter.evm.Config.Tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) + interpreter.evm.Config.Tracer.CaptureExit([]byte{}, 0, nil) } return nil, errStopToken } diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index ffc8811170..3877ce5f69 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -205,7 +205,7 @@ func TestAddMod(t *testing.T) { var ( env = NewEVM(BlockContext{}, TxContext{}, nil, nil, params.TestChainConfig, Config{}) stack = newstack() - evmInterpreter = NewEVMInterpreter(env, env.Config) + evmInterpreter = NewEVMInterpreter(env) pc = uint64(0) ) tests := []struct { @@ -295,7 +295,7 @@ func opBenchmark(bench *testing.B, op executionFunc, args ...string) { env = NewEVM(BlockContext{}, TxContext{}, nil, nil, params.TestChainConfig, Config{}) stack, rstack = newstack(), newReturnStack() scope = &ScopeContext{nil, stack, rstack, nil} - evmInterpreter = NewEVMInterpreter(env, env.Config) + evmInterpreter = NewEVMInterpreter(env) ) env.interpreter = evmInterpreter @@ -536,7 +536,7 @@ func TestOpMstore(t *testing.T) { env = NewEVM(BlockContext{}, TxContext{}, nil, nil, params.TestChainConfig, Config{}) stack, rstack = newstack(), newReturnStack() mem = NewMemory() - evmInterpreter = NewEVMInterpreter(env, env.Config) + evmInterpreter = NewEVMInterpreter(env) ) env.interpreter = evmInterpreter @@ -562,7 +562,7 @@ func BenchmarkOpMstore(bench *testing.B) { env = NewEVM(BlockContext{}, TxContext{}, nil, nil, params.TestChainConfig, Config{}) stack, rstack = newstack(), newReturnStack() mem = NewMemory() - evmInterpreter = NewEVMInterpreter(env, env.Config) + evmInterpreter = NewEVMInterpreter(env) ) env.interpreter = evmInterpreter @@ -584,7 +584,7 @@ func BenchmarkOpKeccak256(bench *testing.B) { env = NewEVM(BlockContext{}, TxContext{}, nil, nil, params.TestChainConfig, Config{}) stack, rstack = newstack(), newReturnStack() mem = NewMemory() - evmInterpreter = NewEVMInterpreter(env, env.Config) + evmInterpreter = NewEVMInterpreter(env) ) env.interpreter = evmInterpreter mem.Resize(32) diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index c698d704e9..569e413ce8 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -20,6 +20,7 @@ import ( "hash" "github.com/PlatONnetwork/PlatON-Go/common/math" + "github.com/PlatONnetwork/PlatON-Go/log" "github.com/PlatONnetwork/PlatON-Go/x/gov" "github.com/PlatONnetwork/PlatON-Go/common" @@ -31,10 +32,6 @@ type Config struct { Tracer EVMLogger // Opcode logger NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) - // JumpTable contains the EVM instruction table. This - // may be left uninitialised and will be set to the default table. - JumpTable JumpTable - ExtraEips []int // Additional EIPS that are to be enabled ConsoleOutput bool @@ -88,8 +85,8 @@ type keccakState interface { // EVMInterpreter represents an EVM interpreter type EVMInterpreter struct { - evm *EVM - cfg Config + evm *EVM + table *JumpTable hasher keccakState // Keccak256 hasher instance shared across opcodes hasherBuf common.Hash // Keccak256 hasher result array shared aross opcodes @@ -99,24 +96,31 @@ type EVMInterpreter struct { } // NewEVMInterpreter returns a new instance of the Interpreter. -func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { - // We use the STOP instruction whether to see - // the jump table was initialised. If it was not - // we'll set the default jump table. - if cfg.JumpTable[STOP] == nil { - if evm.StateDB == nil { - cfg.JumpTable = shanghaiInstructionSet - } else if evm.chainRules.IsDirac || gov.Gte160VersionState(evm.StateDB) { - cfg.JumpTable = shanghaiInstructionSet - } else { - cfg.JumpTable = londonInstructionSet - } +func NewEVMInterpreter(evm *EVM) *EVMInterpreter { + var table *JumpTable + if evm.StateDB == nil { + table = &shanghaiInstructionSet + } else if evm.chainRules.IsDirac || gov.Gte160VersionState(evm.StateDB) { + table = &shanghaiInstructionSet + } else { + table = &londonInstructionSet } - return &EVMInterpreter{ - evm: evm, - cfg: cfg, + var extraEips []int + if len(evm.Config.ExtraEips) > 0 { + // Deep-copy jumptable to prevent modification of opcodes in other tables + table = copyJumpTable(table) } + for _, eip := range evm.Config.ExtraEips { + if err := EnableEIP(eip, table); err != nil { + // Disable it, so caller can check if it's activated or not + log.Error("EIP activation failed", "eip", eip, "error", err) + } else { + extraEips = append(extraEips, eip) + } + } + evm.Config.ExtraEips = extraEips + return &EVMInterpreter{evm: evm, table: table} } // Run loops and evaluates the contract's code with the given input data and returns @@ -168,22 +172,21 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( logged bool // deferred EVMLogger should ignore already logged steps res []byte // result of the opcode execution function ) - // Don't move this deferrred function, it's placed before the capturestate-deferred method, + // Don't move this deferred function, it's placed before the capturestate-deferred method, // so that it get's executed _after_: the capturestate needs the stacks before // they are returned to the pools defer func() { returnStack(stack) - returnRStack(returns) }() contract.Input = input - if in.cfg.Debug { + if in.evm.Config.Debug { defer func() { if err != nil { if !logged { - in.cfg.Tracer.CaptureState(pcCopy, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) + in.evm.Config.Tracer.CaptureState(pcCopy, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) } else { - in.cfg.Tracer.CaptureFault(pcCopy, op, gasCopy, cost, callContext, in.evm.depth, err) + in.evm.Config.Tracer.CaptureFault(pcCopy, op, gasCopy, cost, callContext, in.evm.depth, err) } } }() @@ -193,15 +196,14 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( // the execution of one of the operations or until the done flag is set by the // parent context. for { - if in.cfg.Debug { + if in.evm.Config.Debug { // Capture pre-execution values for tracing. logged, pcCopy, gasCopy = false, pc, contract.Gas } - // Get the operation from the jump table and validate the stack to ensure there are // enough stack items available to perform the operation. op = contract.GetOp(pc) - operation := in.cfg.JumpTable[op] + operation := in.table[op] cost = operation.constantGas // For tracing // Validate stack if sLen := stack.len(); sLen < operation.minStack { @@ -239,15 +241,15 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( return nil, ErrOutOfGas } // Do tracing before memory expansion - if in.cfg.Debug { - in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) + if in.evm.Config.Debug { + in.evm.Config.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) logged = true } if memorySize > 0 { mem.Resize(memorySize) } - } else if in.cfg.Debug { - in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) + } else if in.evm.Config.Debug { + in.evm.Config.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) logged = true } // execute the operation diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index be335aba5c..bd74cce969 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -111,8 +111,8 @@ func newBerlinInstructionSet() JumpTable { return validate(instructionSet) } -// newIstanbulInstructionSet returns the frontier, homestead -// byzantium, contantinople and petersburg instructions. +// newIstanbulInstructionSet returns the frontier, homestead, byzantium, +// contantinople, istanbul and petersburg instructions. func newIstanbulInstructionSet() JumpTable { instructionSet := newConstantinopleInstructionSet() @@ -124,10 +124,9 @@ func newIstanbulInstructionSet() JumpTable { return validate(instructionSet) } -// newConstantinopleInstructionSet returns the frontier, homestead +// newConstantinopleInstructionSet returns the frontier, homestead, // byzantium and contantinople instructions. func newConstantinopleInstructionSet() JumpTable { - // instructions that can be executed during the byzantium phase. instructionSet := newByzantiumInstructionSet() instructionSet[SHL] = &operation{ execute: opSHL, @@ -164,7 +163,7 @@ func newConstantinopleInstructionSet() JumpTable { return validate(instructionSet) } -// NewByzantiumInstructionSet returns the frontier, homestead and +// newByzantiumInstructionSet returns the frontier, homestead and // byzantium instructions. func newByzantiumInstructionSet() JumpTable { instructionSet := newSpuriousDragonInstructionSet() @@ -235,7 +234,7 @@ func newHomesteadInstructionSet() JumpTable { return validate(instructionSet) } -// NewFrontierInstructionSet returns the frontier instructions +// newFrontierInstructionSet returns the frontier instructions // that can be executed during the frontier phase. func newFrontierInstructionSet() JumpTable { tbl := JumpTable{ @@ -1050,3 +1049,14 @@ func newFrontierInstructionSet() JumpTable { return validate(tbl) } + +func copyJumpTable(source *JumpTable) *JumpTable { + dest := *source + for i, op := range source { + if op != nil { + opCopy := *op + dest[i] = &opCopy + } + } + return &dest +} diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index fb533326cb..92fa0fbf58 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -25,11 +25,7 @@ type OpCode byte // IsPush specifies if an opcode is a PUSH opcode. func (op OpCode) IsPush() bool { - switch op { - case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32: - return true - } - return false + return PUSH0 <= op && op <= PUSH32 } // IsStaticJump specifies if an opcode is JUMP. diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 70aaf10c5e..defb549516 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -20,7 +20,6 @@ import ( "context" "math" "math/big" - "time" "github.com/PlatONnetwork/PlatON-Go/common" "github.com/PlatONnetwork/PlatON-Go/core/rawdb" @@ -39,7 +38,7 @@ type Config struct { Origin common.Address Coinbase common.Address BlockNumber *big.Int - Time *big.Int + Time uint64 GasLimit uint64 GasPrice *big.Int Value *big.Int @@ -64,9 +63,6 @@ func setDefaults(cfg *Config) { if cfg.Difficulty == nil { cfg.Difficulty = new(big.Int) } - if cfg.Time == nil { - cfg.Time = big.NewInt(time.Now().Unix()) - } if cfg.GasLimit == 0 { cfg.GasLimit = math.MaxUint64 } diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index bed0ba79d8..e6e9fbb2c6 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -50,9 +50,6 @@ func TestDefaults(t *testing.T) { t.Error("expected difficulty to be non nil") } - if cfg.Time == nil { - t.Error("expected time to be non nil") - } if cfg.GasLimit == 0 { t.Error("didn't expect gaslimit to be zero") } @@ -176,7 +173,7 @@ func benchmarkEVM_Create(bench *testing.B, code string) { State: statedb, GasLimit: 10000000, Difficulty: big.NewInt(0x200000), - Time: new(big.Int).SetUint64(0), + Time: 0, Coinbase: common.Address{}, BlockNumber: new(big.Int).SetUint64(1), ChainConfig: ¶ms.ChainConfig{ diff --git a/core/vm/testdata/precompiles/bn256ScalarMul.json b/core/vm/testdata/precompiles/bn256ScalarMul.json index 2a28f6304b..b0427fcc05 100644 --- a/core/vm/testdata/precompiles/bn256ScalarMul.json +++ b/core/vm/testdata/precompiles/bn256ScalarMul.json @@ -124,5 +124,12 @@ "Name": "cdetrio15", "Gas": 6000, "NoBenchmark": true + }, + { + "Input": "039730ea8dff1254c0fee9c0ea777d29a9c710b7e616683f194f18c43b43b869073a5ffcc6fc7a28c30723d6e58ce577356982d65b833a5a5c15bf9024b43d980000000000000000000000000000000000000000000000000000000000000000", + "Expected": "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "Name": "zeroScalar", + "Gas": 6000, + "NoBenchmark": true } ] \ No newline at end of file diff --git a/core/vm/wagon_runtime.go b/core/vm/wagon_runtime.go index 4adcdb7a15..3fce99da59 100644 --- a/core/vm/wagon_runtime.go +++ b/core/vm/wagon_runtime.go @@ -875,7 +875,7 @@ func Gas(proc *exec.Process) uint64 { func Timestamp(proc *exec.Process) int64 { ctx := proc.HostCtx().(*VMContext) checkGas(ctx, GasQuickStep) - return ctx.evm.Context.Time.Int64() + return int64(ctx.evm.Context.Time) } func Coinbase(proc *exec.Process, dst uint32) { diff --git a/crypto/secp256k1/libsecp256k1/src/asm/field_10x26_arm.s b/crypto/secp256k1/libsecp256k1/src/asm/field_10x26_arm.s index 1e2d7ff961..5a9cc3ffcf 100644 --- a/crypto/secp256k1/libsecp256k1/src/asm/field_10x26_arm.s +++ b/crypto/secp256k1/libsecp256k1/src/asm/field_10x26_arm.s @@ -11,7 +11,7 @@ Note: - To avoid unnecessary loads and make use of available registers, two 'passes' have every time been interleaved, with the odd passes accumulating c' and d' - which will be added to c and d respectively in the the even passes + which will be added to c and d respectively in the even passes */ diff --git a/eth/api_backend.go b/eth/api_backend.go index 970cd1f08d..0ae4c308a3 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -119,6 +119,17 @@ func (b *EthAPIBackend) BlockByHash(ctx context.Context, hash common.Hash) (*typ return b.eth.blockchain.GetBlockByHash(hash), nil } +// GetBody returns body of a block. It does not resolve special block numbers. +func (b *EthAPIBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + if number < 0 || hash == (common.Hash{}) { + return nil, errors.New("invalid arguments; expect hash and no special block numbers") + } + if body := b.eth.blockchain.GetBody(hash); body != nil { + return body, nil + } + return nil, errors.New("block body not found") +} + func (b *EthAPIBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) { if blockNr, ok := blockNrOrHash.Number(); ok { return b.BlockByNumber(ctx, blockNr) diff --git a/eth/backend.go b/eth/backend.go index a218757e76..e3c598be0b 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -648,6 +648,7 @@ func (s *Ethereum) Start() error { func (s *Ethereum) Stop() error { s.ethDialCandidates.Close() s.snapDialCandidates.Close() + s.p2pServer.CloseConsensusDial() s.handler.Stop() // Then stop everything else. diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 2715db44df..7331d69645 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -20,6 +20,7 @@ package downloader import ( "errors" "fmt" + "github.com/PlatONnetwork/PlatON-Go/trie" "math/big" "sync" "sync/atomic" @@ -202,6 +203,10 @@ type BlockChain interface { // Snapshots returns the blockchain snapshot tree to paused it during sync. Snapshots() *snapshot.Tree + + // TrieDB retrieves the low level trie database used for interacting + // with trie nodes. + TrieDB() *trie.Database } // New creates a new downloader to fetch hashes and blocks from remote peers. @@ -222,7 +227,7 @@ func New(stateDb ethdb.Database, snapshotDB snapshotdb.DB, mux *event.TypeMux, c pposInfoCh: make(chan dataPack, 1), originAndPivotCh: make(chan dataPack, 1), quitCh: make(chan struct{}), - SnapSyncer: snap.NewSyncer(stateDb), + SnapSyncer: snap.NewSyncer(stateDb, chain.TrieDB().Scheme()), stateSyncStart: make(chan *stateSync), snapshotDB: snapshotDB, } @@ -1395,7 +1400,7 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { ) blocks := make([]*types.Block, len(results)) for i, result := range results { - blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.ExtraData) + blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.ExtraData).WithWithdrawals(result.Withdrawals) } // Downloaded blocks are always regarded as trusted after the // transition. Because the downloaded chain is guided by the @@ -1610,7 +1615,7 @@ func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *state blocks := make([]*types.Block, len(results)) receipts := make([]types.Receipts, len(results)) for i, result := range results { - blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.ExtraData) + blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.ExtraData).WithWithdrawals(result.Withdrawals) receipts[i] = result.Receipts } if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil { @@ -1621,7 +1626,7 @@ func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *state } func (d *Downloader) commitPivotBlock(result *fetchResult) error { - block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.ExtraData) + block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.ExtraData).WithWithdrawals(result.Withdrawals) log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash()) diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index ffdc1882d8..21a88b805e 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -280,7 +280,8 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et rlp.DecodeBytes(blob, bodies[i]) } var ( - txsHashes = make([]common.Hash, len(bodies)) + txsHashes = make([]common.Hash, len(bodies)) + withdrawalHashes = make([]common.Hash, len(bodies)) ) hasher := trie.NewStackTrie(nil) for i, body := range bodies { @@ -292,7 +293,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et res := ð.Response{ Req: req, Res: (*eth.BlockBodiesPacket)(&bodies), - Meta: [][]common.Hash{txsHashes}, + Meta: [][]common.Hash{txsHashes, withdrawalHashes}, Time: 1, Done: make(chan error, 1), // Ignore the returned status } diff --git a/eth/downloader/fetchers_concurrent.go b/eth/downloader/fetchers_concurrent.go index 55abb2a706..b661e0a291 100644 --- a/eth/downloader/fetchers_concurrent.go +++ b/eth/downloader/fetchers_concurrent.go @@ -91,8 +91,8 @@ func (d *Downloader) concurrentFetch(queue typedQueue) error { } }() ordering := make(map[*eth.Request]int) - timeouts := prque.New(func(data interface{}, index int) { - ordering[data.(*eth.Request)] = index + timeouts := prque.New[int64, *eth.Request](func(data *eth.Request, index int) { + ordering[data] = index }) timeout := time.NewTimer(0) @@ -268,14 +268,12 @@ func (d *Downloader) concurrentFetch(queue typedQueue) error { // below is purely for to catch programming errors, given the correct // code, there's no possible order of events that should result in a // timeout firing for a non-existent event. - item, exp := timeouts.Peek() + req, exp := timeouts.Peek() if now, at := time.Now(), time.Unix(0, -exp); now.Before(at) { log.Error("Timeout triggered but not reached", "left", at.Sub(now)) timeout.Reset(at.Sub(now)) continue } - req := item.(*eth.Request) - // Stop tracking the timed out request from a timing perspective, // cancel it, so it's not considered in-flight anymore, but keep // the peer marked busy to prevent assigning a second request and @@ -284,11 +282,12 @@ func (d *Downloader) concurrentFetch(queue typedQueue) error { stales[req.Peer] = req delete(ordering, req) - timeouts.Pop() + timeouts.Pop() // Popping an item will reorder indices in `ordering`, delete after, otherwise will resurrect! if timeouts.Size() > 0 { _, exp := timeouts.Peek() timeout.Reset(time.Until(time.Unix(0, -exp))) } + delete(ordering, req) // New timeout potentially set if there are more requests pending, // reschedule the failed one to a free peer fails := queue.unreserve(req.Peer) diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go index c5ac3b9066..e8c20a7bab 100644 --- a/eth/downloader/fetchers_concurrent_bodies.go +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -89,10 +89,10 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan // deliver is responsible for taking a generic response packet from the concurrent // fetcher, unpacking the body data and delivering it to the downloader's queue. func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - txs, extra := packet.Res.(*eth.BlockBodiesPacket).Unpack() - hashsets := packet.Meta.([][]common.Hash) // {txs hashes} + txs, extra, withdrawals := packet.Res.(*eth.BlockBodiesPacket).Unpack() + hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes} - accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], extra) + accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], extra, withdrawals, hashsets[1]) switch { case err == nil && len(txs) == 0: peer.log.Trace("Requested bodies delivered") diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 23cdd351c2..6dbfb83901 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -69,6 +69,7 @@ type fetchResult struct { Transactions types.Transactions Receipts types.Receipts ExtraData []byte + Withdrawals types.Withdrawals } func newFetchResult(header *types.Header, fastSync bool) *fetchResult { @@ -118,7 +119,7 @@ type queue struct { // Headers are "special", they download in batches, supported by a skeleton chain headerHead common.Hash // Hash of the last queued header to verify order headerTaskPool map[uint64]*types.Header // Pending header retrieval tasks, mapping starting indexes to skeleton headers - headerTaskQueue *prque.Prque // Priority queue of the skeleton indexes to fetch the filling headers for + headerTaskQueue *prque.Prque[int64, uint64] // Priority queue of the skeleton indexes to fetch the filling headers for headerPeerMiss map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable headerPendPool map[string]*fetchRequest // Currently pending header retrieval operations headerResults []*types.Header // Result cache accumulating the completed headers @@ -128,15 +129,15 @@ type queue struct { headerContCh chan bool // Channel to notify when header download finishes // All data retrievals below are based on an already assembles header chain - blockTaskPool map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers - blockTaskQueue *prque.Prque // Priority queue of the headers to fetch the blocks (bodies) for - blockPendPool map[string]*fetchRequest // Currently pending block (body) retrieval operations - blockWakeCh chan bool // Channel to notify the block fetcher of new tasks + blockTaskPool map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers + blockTaskQueue *prque.Prque[int64, *types.Header] // Priority queue of the headers to fetch the blocks (bodies) for + blockPendPool map[string]*fetchRequest // Currently pending block (body) retrieval operations + blockWakeCh chan bool // Channel to notify the block fetcher of new tasks - receiptTaskPool map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers - receiptTaskQueue *prque.Prque // Priority queue of the headers to fetch the receipts for - receiptPendPool map[string]*fetchRequest // Currently pending receipt retrieval operations - receiptWakeCh chan bool // Channel to notify when receipt fetcher of new tasks + receiptTaskPool map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers + receiptTaskQueue *prque.Prque[int64, *types.Header] // Priority queue of the headers to fetch the receipts for + receiptPendPool map[string]*fetchRequest // Currently pending receipt retrieval operations + receiptWakeCh chan bool // Channel to notify when receipt fetcher of new tasks resultCache *resultStore // Downloaded but not yet delivered fetch results resultSize common.StorageSize // Approximate size of a block (exponential moving average) @@ -155,9 +156,9 @@ func newQueue(blockCacheLimit int, thresholdInitialSize int, decodeExtra decodeE lock := new(sync.RWMutex) q := &queue{ headerContCh: make(chan bool, 1), - blockTaskQueue: prque.New(nil), + blockTaskQueue: prque.New[int64, *types.Header](nil), blockWakeCh: make(chan bool, 1), - receiptTaskQueue: prque.New(nil), + receiptTaskQueue: prque.New[int64, *types.Header](nil), receiptWakeCh: make(chan bool, 1), active: sync.NewCond(lock), lock: lock, @@ -264,7 +265,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { } // Schedule all the header retrieval tasks for the skeleton assembly q.headerTaskPool = make(map[uint64]*types.Header) - q.headerTaskQueue = prque.New(nil) + q.headerTaskQueue = prque.New[int64, uint64](nil) q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) q.headerHashes = make([]common.Hash, len(skeleton)*MaxHeaderFetch) @@ -431,12 +432,12 @@ func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest { for send == 0 && !q.headerTaskQueue.Empty() { from, _ := q.headerTaskQueue.Pop() if q.headerPeerMiss[p.id] != nil { - if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok { - skip = append(skip, from.(uint64)) + if _, ok := q.headerPeerMiss[p.id][from]; ok { + skip = append(skip, from) continue } } - send = from.(uint64) + send = from } // Merge all the skipped batches back for _, from := range skip { @@ -488,7 +489,7 @@ func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bo // item - the fetchRequest // progress - whether any progress was made // throttle - if the caller should throttle for a while -func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, +func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque[int64, *types.Header], pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) { // Short circuit if the pool has been depleted, or if the peer's already // downloading something (sanity check not to corrupt state) @@ -506,8 +507,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ { // the task queue will pop items in order, so the highest prio block // is also the lowest block number. - h, _ := taskQueue.Peek() - header := h.(*types.Header) + header, _ := taskQueue.Peek() // we can ask the resultcache if this header is within the // "prioritized" segment of blocks. If it is not, we need to throttle @@ -630,12 +630,14 @@ func (q *queue) ExpireReceipts(peer string) int { } // expire is the generic check that moves a specific expired task from a pending -// pool back into a task pool. +// pool back into a task pool. The syntax on the passed taskQueue is a bit weird +// as we would need a generic expire method to handle both types, but that is not +// supported at the moment at least (Go 1.19). // // Note, this method expects the queue lock to be already held. The reason the // lock is not obtained in here is that the parameters already need to access // the queue, so they already need a lock anyway. -func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue *prque.Prque) int { +func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue interface{}) int { // Retrieve the request being expired and log an error if it's non-existnet, // as there's no order of events that should lead to such expirations. req := pendPool[peer] @@ -647,10 +649,10 @@ func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue // Return any non-satisfied requests to the pool if req.From > 0 { - taskQueue.Push(req.From, -int64(req.From)) + taskQueue.(*prque.Prque[int64, uint64]).Push(req.From, -int64(req.From)) } for _, header := range req.Headers { - taskQueue.Push(header, -int64(header.Number.Uint64())) + taskQueue.(*prque.Prque[int64, *types.Header]).Push(header, -int64(header.Number.Uint64())) } return len(req.Headers) } @@ -768,7 +770,8 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, hashes []comm // DeliverBodies injects a block body retrieval response into the results queue. // The method returns the number of blocks bodies accepted from the delivery and // also wakes any threads waiting for data delivery. -func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash, extraData [][]byte) (int, error) { +func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash, + extraData [][]byte, withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash) (int, error) { q.lock.Lock() defer q.lock.Unlock() @@ -789,12 +792,23 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH //log.Debug("DeliverBodies validate", "deriveSha", deriveSha, "header.TxHash", header.TxHash, "number", header.Number) return errInvalidBody } + if header.WithdrawalsHash == nil { + // discard any withdrawals if we don't have a withdrawal hash set + withdrawalLists[index] = nil + } else if *header.WithdrawalsHash == types.EmptyRootHash && withdrawalLists[index] == nil { + // if the withdrawal hash is the emptyRootHash, + // we expect withdrawals to be [] instead of nil + withdrawalLists[index] = make([]*types.Withdrawal, 0) + } else if withdrawalListHashes[index] != *header.WithdrawalsHash { + return errInvalidBody + } return nil } reconstruct := func(index int, result *fetchResult) { result.Transactions = txLists[index] result.ExtraData = extraData[index] + result.Withdrawals = withdrawalLists[index] result.SetBodyDone() } return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, @@ -828,7 +842,7 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt, recei // reason this lock is not obtained in here is because the parameters already need // to access the queue, so they already need a lock anyway. func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, - taskQueue *prque.Prque, pendPool map[string]*fetchRequest, + taskQueue *prque.Prque[int64, *types.Header], pendPool map[string]*fetchRequest, reqTimer metrics.Timer, resInMeter metrics.Meter, resDropMeter metrics.Meter, results int, validate func(index int, header *types.Header) error, reconstruct func(index int, result *fetchResult)) (int, error) { diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go index e0f5e05cd4..4e2279d25d 100644 --- a/eth/downloader/queue_test.go +++ b/eth/downloader/queue_test.go @@ -323,7 +323,7 @@ func XTestDelivery(t *testing.T) { txsHashes[i] = types.DeriveSha(types.Transactions(txs), hasher) } time.Sleep(100 * time.Millisecond) - _, err := q.DeliverBodies(peer.id, txset, txsHashes, extraset) + _, err := q.DeliverBodies(peer.id, txset, txsHashes, extraset, nil, nil) if err != nil { fmt.Printf("delivered %d bodies %v\n", len(txset), err) } diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go index f8583b34e9..d4ae83b99e 100644 --- a/eth/downloader/statesync.go +++ b/eth/downloader/statesync.go @@ -121,3 +121,281 @@ func (s *stateSync) Cancel() error { }) return s.Wait() } + +// loop is the main event loop of a state trie sync. It is responsible for the +// assignment of new tasks to peers (including sending it to them) as well as +// for the processing of inbound data. Note, that the loop does not directly +// receive data from peers, rather those are buffered up in the downloader and +// pushed here async. The reason is to decouple processing from data receipt +// and timeouts. +func (s *stateSync) loop() (err error) { + // Listen for new peer events to assign tasks to them + newPeer := make(chan *peerConnection, 1024) + peerSub := s.d.peers.SubscribeNewPeers(newPeer) + defer peerSub.Unsubscribe() + defer func() { + cerr := s.commit(true) + if err == nil { + err = cerr + } + }() + + // Keep assigning new tasks until the sync completes or aborts + for s.sched.Pending() > 0 { + if err = s.commit(false); err != nil { + return err + } + s.assignTasks() + // Tasks assigned, wait for something to happen + select { + case <-newPeer: + // New peer arrived, try to assign it download tasks + + case <-s.cancel: + return errCancelStateFetch + + case <-s.d.cancelCh: + return errCanceled + + case req := <-s.deliver: + // Response, disconnect or timeout triggered, drop the peer if stalling + log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut()) + if req.nItems <= 2 && !req.dropped && req.timedOut() { + // 2 items are the minimum requested, if even that times out, we've no use of + // this peer at the moment. + log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id) + if s.d.dropPeer == nil { + // The dropPeer method is nil when `--copydb` is used for a local copy. + // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored + req.peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id) + } else { + s.d.dropPeer(req.peer.id) + + // If this peer was the master peer, abort sync immediately + s.d.cancelLock.RLock() + master := req.peer.id == s.d.cancelPeer + s.d.cancelLock.RUnlock() + + if master { + s.d.cancel() + return errTimeout + } + } + } + // Process all the received blobs and check for stale delivery + delivered, err := s.process(req) + req.peer.SetNodeDataIdle(delivered, req.delivered) + if err != nil { + log.Warn("Node data write error", "err", err) + return err + } + } + } + return nil +} + +func (s *stateSync) commit(force bool) error { + if !force && s.bytesUncommitted < ethdb.IdealBatchSize { + return nil + } + start := time.Now() + b := s.d.stateDB.NewBatch() + if err := s.sched.Commit(b); err != nil { + return err + } + if err := b.Write(); err != nil { + return fmt.Errorf("DB write error: %v", err) + } + s.updateStats(s.numUncommitted, 0, 0, time.Since(start)) + s.numUncommitted = 0 + s.bytesUncommitted = 0 + return nil +} + +// assignTasks attempts to assign new tasks to all idle peers, either from the +// batch currently being retried, or fetching new data from the trie sync itself. +func (s *stateSync) assignTasks() { + // Iterate over all idle peers and try to assign them state fetches + peers, _ := s.d.peers.NodeDataIdlePeers() + for _, p := range peers { + // Assign a batch of fetches proportional to the estimated latency/bandwidth + cap := p.NodeDataCapacity(s.d.peers.rates.TargetRoundTrip()) + req := &stateReq{peer: p, timeout: s.d.peers.rates.TargetTimeout()} + + nodes, _, codes := s.fillTasks(cap, req) + + // If the peer was assigned tasks to fetch, send the network request + if len(nodes)+len(codes) > 0 { + req.peer.log.Trace("Requesting batch of state data", "nodes", len(nodes), "codes", len(codes), "root", s.root) + select { + case s.d.trackStateReq <- req: + req.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under eth/6x + case <-s.cancel: + case <-s.d.cancelCh: + } + } + } +} + +// fillTasks fills the given request object with a maximum of n state download +// tasks to send to the remote peer. +func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { + // Refill available tasks from the scheduler. + if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 { + nodes, paths, codes := s.sched.Missing(fill) + for i, hash := range nodes { + s.trieTasks[hash] = &trieTask{ + path: paths[i], + attempts: make(map[string]struct{}), + } + } + for _, hash := range codes { + s.codeTasks[hash] = &codeTask{ + attempts: make(map[string]struct{}), + } + } + } + // Find tasks that haven't been tried with the request's peer. Prefer code + // over trie nodes as those can be written to disk and forgotten about. + nodes = make([]common.Hash, 0, n) + paths = make([]trie.SyncPath, 0, n) + codes = make([]common.Hash, 0, n) + + req.trieTasks = make(map[common.Hash]*trieTask, n) + req.codeTasks = make(map[common.Hash]*codeTask, n) + + for hash, t := range s.codeTasks { + // Stop when we've gathered enough requests + if len(nodes)+len(codes) == n { + break + } + // Skip any requests we've already tried from this peer + if _, ok := t.attempts[req.peer.id]; ok { + continue + } + // Assign the request to this peer + t.attempts[req.peer.id] = struct{}{} + codes = append(codes, hash) + req.codeTasks[hash] = t + delete(s.codeTasks, hash) + } + for hash, t := range s.trieTasks { + // Stop when we've gathered enough requests + if len(nodes)+len(codes) == n { + break + } + // Skip any requests we've already tried from this peer + if _, ok := t.attempts[req.peer.id]; ok { + continue + } + // Assign the request to this peer + t.attempts[req.peer.id] = struct{}{} + + nodes = append(nodes, hash) + paths = append(paths, t.path) + + req.trieTasks[hash] = t + delete(s.trieTasks, hash) + } + req.nItems = uint16(len(nodes) + len(codes)) + return nodes, paths, codes +} + +// process iterates over a batch of delivered state data, injecting each item +// into a running state sync, re-queuing any items that were requested but not +// delivered. Returns whether the peer actually managed to deliver anything of +// value, and any error that occurred. +func (s *stateSync) process(req *stateReq) (int, error) { + // Collect processing stats and update progress if valid data was received + duplicate, unexpected, successful := 0, 0, 0 + + defer func(start time.Time) { + if duplicate > 0 || unexpected > 0 { + s.updateStats(0, duplicate, unexpected, time.Since(start)) + } + }(time.Now()) + + // Iterate over all the delivered data and inject one-by-one into the trie + for _, blob := range req.response { + hash, err := s.processNodeData(blob) + switch err { + case nil: + s.numUncommitted++ + s.bytesUncommitted += len(blob) + successful++ + case trie.ErrNotRequested: + unexpected++ + case trie.ErrAlreadyProcessed: + duplicate++ + default: + return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) + } + // Delete from both queues (one delivery is enough for the syncer) + delete(req.trieTasks, hash) + delete(req.codeTasks, hash) + } + // Put unfulfilled tasks back into the retry queue + npeers := s.d.peers.Len() + for hash, task := range req.trieTasks { + // If the node did deliver something, missing items may be due to a protocol + // limit or a previous timeout + delayed delivery. Both cases should permit + // the node to retry the missing items (to avoid single-peer stalls). + if len(req.response) > 0 || req.timedOut() { + delete(task.attempts, req.peer.id) + } + // If we've requested the node too many times already, it may be a malicious + // sync where nobody has the right data. Abort. + if len(task.attempts) >= npeers { + return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) + } + // Missing item, place into the retry queue. + s.trieTasks[hash] = task + } + for hash, task := range req.codeTasks { + // If the node did deliver something, missing items may be due to a protocol + // limit or a previous timeout + delayed delivery. Both cases should permit + // the node to retry the missing items (to avoid single-peer stalls). + if len(req.response) > 0 || req.timedOut() { + delete(task.attempts, req.peer.id) + } + // If we've requested the node too many times already, it may be a malicious + // sync where nobody has the right data. Abort. + if len(task.attempts) >= npeers { + return successful, fmt.Errorf("byte code %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) + } + // Missing item, place into the retry queue. + s.codeTasks[hash] = task + } + return successful, nil +} + +// processNodeData tries to inject a trie node data blob delivered from a remote +// peer into the state trie, returning whether anything useful was written or any +// error occurred. +func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) { + res := trie.SyncResult{Data: blob} + s.keccak.Reset() + s.keccak.Write(blob) + s.keccak.Read(res.Hash[:]) + err := s.sched.Process(res) + return res.Hash, err +} + +// updateStats bumps the various state sync progress counters and displays a log +// message for the user to see. +func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) { + s.d.syncStatsLock.Lock() + defer s.d.syncStatsLock.Unlock() + + s.d.syncStatsState.pending = uint64(s.sched.Pending()) + s.d.syncStatsState.processed += uint64(written) + s.d.syncStatsState.duplicate += uint64(duplicate) + s.d.syncStatsState.unexpected += uint64(unexpected) + + if written > 0 || duplicate > 0 || unexpected > 0 { + log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected) + } + if written > 0 { + rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed) + } +} diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 3b08708c35..275659abf5 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -212,6 +212,9 @@ type Config struct { // Checkpoint is a hardcoded checkpoint which can be nil. Checkpoint *params.TrustedCheckpoint `toml:",omitempty"` + + // OverrideShanghai (TODO: remove after the fork) + OverrideShanghai *uint64 `toml:",omitempty"` } // CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 0e7f3a6833..76adcdb7f3 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -76,6 +76,7 @@ func (c Config) MarshalTOML() (interface{}, error) { RPCTxFeeCap float64 Whitelist map[uint64]common.Hash `toml:"-"` Checkpoint *params.TrustedCheckpoint `toml:",omitempty"` + OverrideShanghai *uint64 `toml:",omitempty"` } var enc Config enc.Genesis = c.Genesis @@ -137,6 +138,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.RPCTxFeeCap = c.RPCTxFeeCap enc.Whitelist = c.Whitelist enc.Checkpoint = c.Checkpoint + enc.OverrideShanghai = c.OverrideShanghai return &enc, nil } @@ -202,6 +204,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { RPCTxFeeCap *float64 Whitelist map[uint64]common.Hash `toml:"-"` Checkpoint *params.TrustedCheckpoint `toml:",omitempty"` + OverrideShanghai *uint64 `toml:",omitempty"` } var dec Config if err := unmarshal(&dec); err != nil { @@ -384,5 +387,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.Checkpoint != nil { c.Checkpoint = dec.Checkpoint } + if dec.OverrideShanghai != nil { + c.OverrideShanghai = dec.OverrideShanghai + } return nil } diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 8476c12dd5..da09bd9a76 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -173,9 +173,9 @@ type BlockFetcher struct { completing map[common.Hash]*blockAnnounce // Blocks with headers, currently body-completing // Block cache - queue *prque.Prque // Queue containing the import operations (block number sorted) - queues map[string]int // Per peer block counts to prevent memory exhaustion - queued map[common.Hash]*blockOrHeaderInject // Set of already queued blocks (to dedup imports) + queue *prque.Prque[int64, *blockOrHeaderInject] // Queue containing the import operations (block number sorted) + queues map[string]int // Per peer block counts to prevent memory exhaustion + queued map[common.Hash]*blockOrHeaderInject // Set of already queued blocks (to dedup imports) // Callbacks getBlock blockRetrievalFn // Retrieves a block from the local chain @@ -208,7 +208,7 @@ func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, b fetching: make(map[common.Hash]*blockAnnounce), fetched: make(map[common.Hash][]*blockAnnounce), completing: make(map[common.Hash]*blockAnnounce), - queue: prque.New(nil), + queue: prque.New[int64, *blockOrHeaderInject](nil), queues: make(map[string]int), queued: make(map[common.Hash]*blockOrHeaderInject), getBlock: getBlock, @@ -350,7 +350,7 @@ func (f *BlockFetcher) loop() { // Import any queued blocks that could potentially fit height := f.chainHeight() for !f.queue.Empty() { - op := f.queue.PopItem().(*blockOrHeaderInject) + op := f.queue.PopItem() hash := op.hash() if f.queueChangeHook != nil { f.queueChangeHook(hash, false) @@ -528,7 +528,8 @@ func (f *BlockFetcher) loop() { case res := <-resCh: res.Done <- nil - txs, extra := res.Res.(*eth.BlockBodiesPacket).Unpack() + // Ignoring withdrawals here, since the block fetcher is not used post-merge. + txs, extra, _ := res.Res.(*eth.BlockBodiesPacket).Unpack() f.FilterBodies(peer, txs, extra, time.Now()) case <-timeout.C: diff --git a/eth/filters/filter.go b/eth/filters/filter.go index ceb6f478b7..ab656a1a5e 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -104,7 +104,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { if header == nil { return nil, errors.New("unknown block") } - return f.blockLogs(ctx, header, false) + return f.blockLogs(ctx, header) } // Short-cut if all we care about is pending logs if f.begin == rpc.PendingBlockNumber.Int64() { @@ -192,7 +192,7 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err if header == nil || err != nil { return logs, err } - found, err := f.blockLogs(ctx, header, true) + found, err := f.checkMatches(ctx, header) if err != nil { return logs, err } @@ -210,11 +210,14 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e var logs []*types.Log for ; f.begin <= int64(end); f.begin++ { + if f.begin%10 == 0 && ctx.Err() != nil { + return logs, ctx.Err() + } header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin)) if header == nil || err != nil { return logs, err } - found, err := f.blockLogs(ctx, header, false) + found, err := f.blockLogs(ctx, header) if err != nil { return logs, err } @@ -224,15 +227,8 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e } // blockLogs returns the logs matching the filter criteria within a single block. -func (f *Filter) blockLogs(ctx context.Context, header *types.Header, skipBloom bool) ([]*types.Log, error) { - // Fast track: no filtering criteria - if len(f.addresses) == 0 && len(f.topics) == 0 { - list, err := f.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64()) - if err != nil { - return nil, err - } - return flatten(list), nil - } else if skipBloom || bloomFilter(header.Bloom, f.addresses, f.topics) { +func (f *Filter) blockLogs(ctx context.Context, header *types.Header) ([]*types.Log, error) { + if bloomFilter(header.Bloom, f.addresses, f.topics) { return f.checkMatches(ctx, header) } return nil, nil @@ -240,30 +236,37 @@ func (f *Filter) blockLogs(ctx context.Context, header *types.Header, skipBloom // checkMatches checks if the receipts belonging to the given header contain any log events that // match the filter criteria. This function is called when the bloom filter signals a potential match. +// skipFilter signals all logs of the given block are requested. func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) { - logsList, err := f.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64()) + hash := header.Hash() + // Logs in cache are partially filled with context data + // such as tx index, block hash, etc. + // Notably tx hash is NOT filled in because it needs + // access to block body data. + cached, err := f.sys.cachedLogElem(ctx, hash, header.Number.Uint64()) if err != nil { return nil, err } - - unfiltered := flatten(logsList) - logs := filterLogs(unfiltered, nil, nil, f.addresses, f.topics) - if len(logs) > 0 { - // We have matching logs, check if we need to resolve full logs via the light client - if logs[0].TxHash == (common.Hash{}) { - receipts, err := f.sys.backend.GetReceipts(ctx, header.Hash()) - if err != nil { - return nil, err - } - unfiltered = unfiltered[:0] - for _, receipt := range receipts { - unfiltered = append(unfiltered, receipt.Logs...) - } - logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) - } + logs := filterLogs(cached.logs, nil, nil, f.addresses, f.topics) + if len(logs) == 0 { + return nil, nil + } + // Most backends will deliver un-derived logs, but check nevertheless. + if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) { return logs, nil } - return nil, nil + + body, err := f.sys.cachedGetBody(ctx, cached, hash, header.Number.Uint64()) + if err != nil { + return nil, err + } + for i, log := range logs { + // Copy log not to modify cache elements + logcopy := *log + logcopy.TxHash = body.Transactions[logcopy.TxIndex].Hash() + logs[i] = &logcopy + } + return logs, nil } // pendingLogs returns the logs matching the filter criteria within the pending block. @@ -353,11 +356,3 @@ func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]commo } return true } - -func flatten(list [][]*types.Log) []*types.Log { - var flat []*types.Log - for _, logs := range list { - flat = append(flat, logs...) - } - return flat -} diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index d7db88619d..7f1af15e7e 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -22,6 +22,7 @@ import ( "context" "fmt" "sync" + "sync/atomic" "time" lru "github.com/hashicorp/golang-lru" @@ -35,6 +36,7 @@ import ( "github.com/PlatONnetwork/PlatON-Go/ethdb" "github.com/PlatONnetwork/PlatON-Go/event" "github.com/PlatONnetwork/PlatON-Go/log" + "github.com/PlatONnetwork/PlatON-Go/params" "github.com/PlatONnetwork/PlatON-Go/rpc" ) @@ -58,10 +60,13 @@ type Backend interface { ChainDb() ethdb.Database HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) + GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) PendingBlockAndReceipts() (*types.Block, types.Receipts) + CurrentHeader() *types.Header + ChainConfig() *params.ChainConfig SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription @@ -75,30 +80,30 @@ type Backend interface { // FilterSystem holds resources shared by all filters. type FilterSystem struct { backend Backend - logsCache *lru.Cache + logsCache *lru.Cache[common.Hash, *logCacheElem] cfg *Config } // NewFilterSystem creates a filter system. func NewFilterSystem(backend Backend, config Config) *FilterSystem { config = config.withDefaults() - - cache, err := lru.New(config.LogCacheSize) - if err != nil { - panic(err) - } return &FilterSystem{ backend: backend, - logsCache: cache, + logsCache: lru.NewCache[common.Hash, *logCacheElem](config.LogCacheSize), cfg: &config, } } -// cachedGetLogs loads block logs from the backend and caches the result. -func (sys *FilterSystem) cachedGetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { +type logCacheElem struct { + logs []*types.Log + body atomic.Value +} + +// cachedLogElem loads block logs from the backend and caches the result. +func (sys *FilterSystem) cachedLogElem(ctx context.Context, blockHash common.Hash, number uint64) (*logCacheElem, error) { cached, ok := sys.logsCache.Get(blockHash) if ok { - return cached.([][]*types.Log), nil + return cached, nil } logs, err := sys.backend.GetLogs(ctx, blockHash, number) @@ -108,8 +113,35 @@ func (sys *FilterSystem) cachedGetLogs(ctx context.Context, blockHash common.Has if logs == nil { return nil, fmt.Errorf("failed to get logs for block #%d (0x%s)", number, blockHash.TerminalString()) } - sys.logsCache.Add(blockHash, logs) - return logs, nil + // Database logs are un-derived. + // Fill in whatever we can (txHash is inaccessible at this point). + flattened := make([]*types.Log, 0) + var logIdx uint + for i, txLogs := range logs { + for _, log := range txLogs { + log.BlockHash = blockHash + log.BlockNumber = number + log.TxIndex = uint(i) + log.Index = logIdx + logIdx++ + flattened = append(flattened, log) + } + } + elem := &logCacheElem{logs: flattened} + sys.logsCache.Add(blockHash, elem) + return elem, nil +} + +func (sys *FilterSystem) cachedGetBody(ctx context.Context, elem *logCacheElem, hash common.Hash, number uint64) (*types.Body, error) { + if body := elem.body.Load(); body != nil { + return body.(*types.Body), nil + } + body, err := sys.backend.GetBody(ctx, hash, rpc.BlockNumber(number)) + if err != nil { + return nil, err + } + elem.body.Store(body) + return body, nil } // Type determines the kind of filter and is used to put the filter in to @@ -125,12 +157,12 @@ const ( PendingLogsSubscription // MinedAndPendingLogsSubscription queries for logs in mined and pending blocks. MinedAndPendingLogsSubscription - // PendingTransactionsSubscription queries tx hashes for pending - // transactions entering the pending state + // PendingTransactionsSubscription queries for pending transactions entering + // the pending state PendingTransactionsSubscription // BlocksSubscription queries hashes for blocks that are imported BlocksSubscription - // LastSubscription keeps track of the last index + // LastIndexSubscription keeps track of the last index LastIndexSubscription ) @@ -438,6 +470,12 @@ func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) if es.lightMode && len(filters[LogsSubscription]) > 0 { es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) { for _, f := range filters[LogsSubscription] { + if f.logsCrit.FromBlock != nil && header.Number.Cmp(f.logsCrit.FromBlock) < 0 { + continue + } + if f.logsCrit.ToBlock != nil && header.Number.Cmp(f.logsCrit.ToBlock) > 0 { + continue + } if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 { f.logs <- matchedLogs } @@ -481,42 +519,39 @@ func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func // filter logs of a single header in light client mode func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log { - if bloomFilter(header.Bloom, addresses, topics) { - // Get the logs of the block - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - logsList, err := es.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64()) - if err != nil { - return nil - } - var unfiltered []*types.Log - for _, logs := range logsList { - for _, log := range logs { - logcopy := *log - logcopy.Removed = remove - unfiltered = append(unfiltered, &logcopy) - } - } - logs := filterLogs(unfiltered, nil, nil, addresses, topics) - if len(logs) > 0 && logs[0].TxHash == (common.Hash{}) { - // We have matching but non-derived logs - receipts, err := es.backend.GetReceipts(ctx, header.Hash()) - if err != nil { - return nil - } - unfiltered = unfiltered[:0] - for _, receipt := range receipts { - for _, log := range receipt.Logs { - logcopy := *log - logcopy.Removed = remove - unfiltered = append(unfiltered, &logcopy) - } - } - logs = filterLogs(unfiltered, nil, nil, addresses, topics) - } + if !bloomFilter(header.Bloom, addresses, topics) { + return nil + } + // Get the logs of the block + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + cached, err := es.sys.cachedLogElem(ctx, header.Hash(), header.Number.Uint64()) + if err != nil { + return nil + } + unfiltered := append([]*types.Log{}, cached.logs...) + for i, log := range unfiltered { + // Don't modify in-cache elements + logcopy := *log + logcopy.Removed = remove + // Swap copy in-place + unfiltered[i] = &logcopy + } + logs := filterLogs(unfiltered, nil, nil, addresses, topics) + // Txhash is already resolved + if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) { return logs } - return nil + // Resolve txhash + body, err := es.sys.cachedGetBody(ctx, cached, header.Hash(), header.Number.Uint64()) + if err != nil { + return nil + } + for _, log := range logs { + // logs are already copied, safe to modify + log.TxHash = body.Transactions[log.TxIndex].Hash() + } + return logs } // eventLoop (un)installs filters and processes mux events. diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index 744d4f0612..f77cff8dc2 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -18,7 +18,9 @@ package filters import ( "context" + "errors" "fmt" + "github.com/PlatONnetwork/PlatON-Go/crypto" "math/big" "math/rand" "reflect" @@ -50,6 +52,14 @@ type testBackend struct { chainFeed event.Feed } +func (b *testBackend) ChainConfig() *params.ChainConfig { + panic("implement me") +} + +func (b *testBackend) CurrentHeader() *types.Header { + panic("implement me") +} + func (b *testBackend) ChainDb() ethdb.Database { return b.db } @@ -81,6 +91,13 @@ func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*type return rawdb.ReadHeader(b.db, hash, *number), nil } +func (b *testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + if body := rawdb.ReadBody(b.db, hash, uint64(number)); body != nil { + return body, nil + } + return nil, errors.New("block body not found") +} + func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil { return rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig), nil @@ -657,6 +674,143 @@ func TestPendingLogsSubscription(t *testing.T) { } } +func TestLightFilterLogs(t *testing.T) { + t.Parallel() + + var ( + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, true) + signer = types.HomesteadSigner{} + + firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") + secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") + thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") + notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") + firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") + + // posted twice, once as regular logs and once as pending logs. + allLogs = []*types.Log{ + // Block 1 + {Address: firstAddr, Topics: []common.Hash{}, Data: []byte{}, BlockNumber: 2, Index: 0}, + // Block 2 + {Address: firstAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 0}, + {Address: secondAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 1}, + {Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 3, Index: 2}, + // Block 3 + {Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 4, Index: 0}, + } + + testCases = []struct { + crit FilterCriteria + expected []*types.Log + id rpc.ID + }{ + // match all + 0: {FilterCriteria{}, allLogs, ""}, + // match none due to no matching addresses + 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""}, + // match logs based on addresses, ignore topics + 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, + // match logs based on addresses and topics + 3: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, + // all logs with block num >= 3 + 4: {FilterCriteria{FromBlock: big.NewInt(3), ToBlock: big.NewInt(5)}, allLogs[1:], ""}, + // all logs + 5: {FilterCriteria{FromBlock: big.NewInt(0), ToBlock: big.NewInt(5)}, allLogs, ""}, + // all logs with 1>= block num <=2 and topic secondTopic + 6: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(3), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, + } + + key, _ = crypto.GenerateKey() + addr = crypto.PubkeyToAddress(key.PublicKey) + genesis = (&core.Genesis{Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + addr: {Balance: big.NewInt(params.LAT)}, + }, + }).MustCommit(db) + receipts = []*types.Receipt{{ + Logs: []*types.Log{allLogs[0]}, + }, { + Logs: []*types.Log{allLogs[1], allLogs[2], allLogs[3]}, + }, { + Logs: []*types.Log{allLogs[4]}, + }} + ) + + blocks, _ := core.GenerateChain(params.TestChainConfig, genesis, consensus.NewFaker(), db, 4, func(i int, b *core.BlockGen) { + if i == 0 { + return + } + receipts[i-1].Bloom = types.CreateBloom(types.Receipts{receipts[i-1]}) + b.AddUncheckedReceipt(receipts[i-1]) + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i - 1), To: &common.Address{}, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, key) + b.AddTx(tx) + }) + for i, block := range blocks { + rawdb.WriteBlock(db, block) + rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) + rawdb.WriteHeadBlockHash(db, block.Hash()) + if i > 0 { + rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), []*types.Receipt{receipts[i-1]}) + } + } + // create all filters + for i := range testCases { + id, err := api.NewFilter(testCases[i].crit) + if err != nil { + t.Fatal(err) + } + testCases[i].id = id + } + + // raise events + time.Sleep(1 * time.Second) + for _, block := range blocks { + backend.chainFeed.Send(core.ChainEvent{Block: block, Hash: common.Hash{}, Logs: allLogs}) + } + + for i, tt := range testCases { + var fetched []*types.Log + timeout := time.Now().Add(1 * time.Second) + for { // fetch all expected logs + results, err := api.GetFilterChanges(tt.id) + if err != nil { + t.Fatalf("Unable to fetch logs: %v", err) + } + fetched = append(fetched, results.([]*types.Log)...) + if len(fetched) >= len(tt.expected) { + break + } + // check timeout + if time.Now().After(timeout) { + break + } + + time.Sleep(100 * time.Millisecond) + } + + if len(fetched) != len(tt.expected) { + t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) + return + } + + for l := range fetched { + if fetched[l].Removed { + t.Errorf("expected log not to be removed for log %d in case %d", l, i) + } + expected := *tt.expected[l] + blockNum := expected.BlockNumber - 1 + expected.BlockHash = blocks[blockNum].Hash() + expected.TxHash = blocks[blockNum].Transactions()[0].Hash() + if !reflect.DeepEqual(fetched[l], &expected) { + t.Errorf("invalid log on index %d for case %d", l, i) + } + } + } +} + // TestPendingTxFilterDeadlock tests if the event loop hangs when pending // txes arrive at the same time that one of multiple filters is timing out. // Please refer to #22131 for more details. diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index 241d0a31ac..3fb98d6fc3 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -98,7 +98,7 @@ type Backend interface { // TxPool defines the methods needed by the protocol handler to serve transactions. type TxPool interface { - // Get retrieves the the transaction from the local txpool with the given hash. + // Get retrieves the transaction from the local txpool with the given hash. Get(hash common.Hash) *types.Transaction } @@ -138,7 +138,7 @@ func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2 // NodeInfo represents a short summary of the `eth` sub-protocol metadata // known about the host peer. type NodeInfo struct { - Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4) + Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Rinkeby=4) Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index 36c3855817..6a52631fad 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -43,6 +43,8 @@ var ( testAddr = crypto.PubkeyToAddress(testKey.PublicKey) ) +func u64(val uint64) *uint64 { return &val } + // testBackend is a mock implementation of the live Ethereum message handler. Its // purpose is to allow testing the request/reply workflows and wire serialization // in the `eth` protocol without actually doing any data processing. @@ -55,26 +57,22 @@ type testBackend struct { // newTestBackend creates an empty chain and wraps it into a mock backend. func newTestBackend(blocks int) *testBackend { - return newTestBackendWithGenerator(blocks, nil) + return newTestBackendWithGenerator(blocks, false, nil) } // newTestBackend creates a chain with a number of explicitly defined blocks and // wraps it into a mock backend. -func newTestBackendWithGenerator(blocks int, generator func(int, *core.BlockGen)) *testBackend { - // Create a database pre-initialize with a genesis block - db := rawdb.NewMemoryDatabase() +func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, *core.BlockGen)) *testBackend { + var ( + // Create a database pre-initialize with a genesis block + db = rawdb.NewMemoryDatabase() + config = params.TestChainConfig + ) genesis := (&core.Genesis{ - Config: params.TestChainConfig, + Config: config, Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(100_000_000_000_000_000)}}, }).MustCommit(db) - //chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, consensus.NewFakerWithDataBase(db), vm.Config{}, nil, nil) - /*cache := core.NewBlockChainCache(chain) - bs, _ := core.GenerateChain(params.TestChainConfig, chain.Genesis(), consensus.NewFakerWithDataBase(db), db, blocks, generator) - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - }*/ - chain, _ := core.GenerateBlockChain2(params.TestChainConfig, genesis, consensus.NewFakerWithDataBase(db, genesis), db, blocks, generator) cache := core.NewBlockChainCache(chain) @@ -309,7 +307,17 @@ func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) } func testGetBlockBodies(t *testing.T, protocol uint) { t.Parallel() - backend := newTestBackend(maxBodiesServe + 15) + gen := func(n int, g *core.BlockGen) { + if n%2 == 0 { + w := &types.Withdrawal{ + Address: common.Address{0xaa}, + Amount: 42, + } + g.AddWithdrawal(w) + } + } + + backend := newTestBackendWithGenerator(maxBodiesServe+15, true, gen) defer backend.close() peer, _ := newTestPeer("peer", protocol, backend) @@ -359,7 +367,7 @@ func testGetBlockBodies(t *testing.T, protocol uint) { block := backend.chain.GetBlockByNumber(uint64(num)) hashes = append(hashes, block.Hash()) if len(bodies) < tt.expected { - bodies = append(bodies, &BlockBody{Transactions: block.Transactions()}) + bodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Withdrawals: block.Withdrawals()}) } break } @@ -369,7 +377,7 @@ func testGetBlockBodies(t *testing.T, protocol uint) { hashes = append(hashes, hash) if tt.available[j] && len(bodies) < tt.expected { block := backend.chain.GetBlockByHash(hash) - bodies = append(bodies, &BlockBody{Transactions: block.Transactions()}) + bodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Withdrawals: block.Withdrawals()}) } } // Send the hash request and verify the response @@ -381,7 +389,7 @@ func testGetBlockBodies(t *testing.T, protocol uint) { RequestId: 123, BlockBodiesPacket: bodies, }); err != nil { - t.Errorf("test %d: bodies mismatch: %v", i, err) + t.Fatalf("test %d: bodies mismatch: %v", i, err) } } } @@ -426,7 +434,7 @@ func testGetNodeData(t *testing.T, protocol uint) { } } // Assemble the test environment - backend := newTestBackendWithGenerator(4, generator) + backend := newTestBackendWithGenerator(4, false, generator) defer backend.close() peer, _ := newTestPeer("peer", protocol, backend) @@ -531,7 +539,7 @@ func testGetBlockReceipts(t *testing.T, protocol uint) { } } // Assemble the test environment - backend := newTestBackendWithGenerator(4, generator) + backend := newTestBackendWithGenerator(4, false, generator) defer backend.close() peer, _ := newTestPeer("peer", protocol, backend) diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index b17df10451..c20a4497da 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -382,13 +382,17 @@ func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { } metadata := func() interface{} { var ( - txsHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + txsHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + withdrawalHashes = make([]common.Hash, len(res.BlockBodiesPacket)) ) hasher := trie.NewStackTrie(nil) for i, body := range res.BlockBodiesPacket { txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) + if body.Withdrawals != nil { + withdrawalHashes[i] = types.DeriveSha(types.Withdrawals(body.Withdrawals), hasher) + } } - return [][]common.Hash{txsHashes} + return [][]common.Hash{txsHashes, withdrawalHashes} } return peer.dispatchResponse(&Response{ id: res.RequestId, diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index e037e1504f..816a5f08c6 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -242,19 +242,22 @@ type BlockBodiesRLPPacket66 struct { type BlockBody struct { Transactions []*types.Transaction // Transactions contained within a block ExtraData []byte + Withdrawals []*types.Withdrawal `rlp:"optional"` // Withdrawals contained within a block } // Unpack retrieves the transactions and uncles from the range packet and returns // them in a split flat format that's more consistent with the internal data structures. -func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]byte) { +func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]byte, [][]*types.Withdrawal) { + // TODO(matt): add support for withdrawals to fetchers var ( - txset = make([][]*types.Transaction, len(*p)) - extraData = make([][]byte, len(*p)) + txset = make([][]*types.Transaction, len(*p)) + extraData = make([][]byte, len(*p)) + withdrawalset = make([][]*types.Withdrawal, len(*p)) ) for i, body := range *p { - txset[i], extraData[i] = body.Transactions, body.ExtraData + txset[i], extraData[i], withdrawalset[i] = body.Transactions, body.ExtraData, body.Withdrawals } - return txset, extraData + return txset, extraData, withdrawalset } // GetNodeDataPacket represents a trie node data query. diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 133a6da280..ba8553ec94 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -418,7 +418,8 @@ type SyncPeer interface { // - The peer delivers a stale response after a previous timeout // - The peer delivers a refusal to serve the requested state type Syncer struct { - db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup) + db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup) + scheme string // Node scheme used in node database root common.Hash // Current state trie root being synced tasks []*accountTask // Current account task set being synced @@ -486,9 +487,10 @@ type Syncer struct { // NewSyncer creates a new snapshot syncer to download the Ethereum state over the // snap protocol. -func NewSyncer(db ethdb.KeyValueStore) *Syncer { +func NewSyncer(db ethdb.KeyValueStore, scheme string) *Syncer { return &Syncer{ - db: db, + db: db, + scheme: scheme, peers: make(map[string]SyncPeer), peerJoin: new(event.Feed), @@ -582,7 +584,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error { s.lock.Lock() s.root = root s.healer = &healTask{ - scheduler: state.NewStateSync(root, s.db, s.onHealState), + scheduler: state.NewStateSync(root, s.db, s.onHealState, s.scheme), trieTasks: make(map[string]common.Hash), codeTasks: make(map[common.Hash]struct{}), } @@ -744,8 +746,9 @@ func (s *Syncer) loadSyncStatus() { s.accountBytes += common.StorageSize(len(key) + len(value)) }, } - task.genTrie = trie.NewStackTrie(task.genBatch) - + task.genTrie = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { + rawdb.WriteTrieNode(task.genBatch, owner, path, hash, val, s.scheme) + }) for accountHash, subtasks := range task.SubTasks { for _, subtask := range subtasks { subtask.genBatch = ethdb.HookedBatch{ @@ -754,7 +757,9 @@ func (s *Syncer) loadSyncStatus() { s.storageBytes += common.StorageSize(len(key) + len(value)) }, } - subtask.genTrie = trie.NewStackTrieWithOwner(subtask.genBatch, accountHash) + subtask.genTrie = trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { + rawdb.WriteTrieNode(subtask.genBatch, owner, path, hash, val, s.scheme) + }, accountHash) } } } @@ -811,7 +816,9 @@ func (s *Syncer) loadSyncStatus() { Last: last, SubTasks: make(map[common.Hash][]*storageTask), genBatch: batch, - genTrie: trie.NewStackTrie(batch), + genTrie: trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { + rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme) + }), }) log.Debug("Created account sync task", "from", next, "last", last) next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) @@ -1836,7 +1843,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) { } // Check if the account is a contract with an unknown storage trie if account.Root != emptyRoot { - if ok, err := s.db.Has(account.Root[:]); err != nil || !ok { + if !rawdb.HasTrieNode(s.db, res.hashes[i], nil, account.Root, s.scheme) { // If there was a previous large state retrieval in progress, // don't restart it from scratch. This happens if a sync cycle // is interrupted and resumed later. However, *do* update the @@ -2008,7 +2015,9 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { Last: r.End(), root: acc.Root, genBatch: batch, - genTrie: trie.NewStackTrieWithOwner(batch, account), + genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { + rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme) + }, account), }) for r.Next() { batch := ethdb.HookedBatch{ @@ -2022,7 +2031,9 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { Last: r.End(), root: acc.Root, genBatch: batch, - genTrie: trie.NewStackTrieWithOwner(batch, account), + genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { + rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme) + }, account), }) } for _, task := range tasks { @@ -2067,7 +2078,9 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { slots += len(res.hashes[i]) if i < len(res.hashes)-1 || res.subTask == nil { - tr := trie.NewStackTrieWithOwner(batch, account) + tr := trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) { + rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme) + }, account) for j := 0; j < len(res.hashes[i]); j++ { tr.Update(res.hashes[i][j][:], res.slots[i][j]) } diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 3ebb7653fb..0a8a63cb93 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -160,6 +160,13 @@ func newTestPeer(id string, t *testing.T, term func()) *testPeer { return peer } +func (t *testPeer) setStorageTries(tries map[common.Hash]*trie.Trie) { + t.storageTries = make(map[common.Hash]*trie.Trie) + for root, trie := range tries { + t.storageTries[root] = trie.Copy() + } +} + func (t *testPeer) ID() string { return t.id } func (t *testPeer) Log() log.Logger { return t.logger } @@ -564,9 +571,9 @@ func TestSyncBloatedProof(t *testing.T) { }) } ) - sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100) source := newTestPeer("source", t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { @@ -612,15 +619,15 @@ func TestSyncBloatedProof(t *testing.T) { } return nil } - syncer := setupSyncer(source) + syncer := setupSyncer(nodeScheme, source) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil { t.Fatal("No error returned from incomplete/cancelled sync") } } -func setupSyncer(peers ...*testPeer) *Syncer { +func setupSyncer(scheme string, peers ...*testPeer) *Syncer { stateDb := rawdb.NewMemoryDatabase() - syncer := NewSyncer(stateDb) + syncer := NewSyncer(stateDb, scheme) for _, peer := range peers { syncer.Register(peer) peer.remote = syncer @@ -641,15 +648,15 @@ func TestSync(t *testing.T) { }) } ) - sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems return source } - syncer := setupSyncer(mkSource("source")) + syncer := setupSyncer(nodeScheme, mkSource("source")) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } @@ -670,15 +677,15 @@ func TestSyncTinyTriePanic(t *testing.T) { }) } ) - sourceAccountTrie, elems := makeAccountTrieNoStorage(1) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems return source } - syncer := setupSyncer(mkSource("source")) + syncer := setupSyncer(nodeScheme, mkSource("source")) done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) @@ -700,15 +707,15 @@ func TestMultiSync(t *testing.T) { }) } ) - sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems return source } - syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB")) + syncer := setupSyncer(nodeScheme, mkSource("sourceA"), mkSource("sourceB")) done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) @@ -730,17 +737,17 @@ func TestSyncWithStorage(t *testing.T) { }) } ) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems - source.storageTries = storageTries + source.setStorageTries(storageTries) source.storageValues = storageElems return source } - syncer := setupSyncer(mkSource("sourceA")) + syncer := setupSyncer(nodeScheme, mkSource("sourceA")) done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) @@ -762,13 +769,13 @@ func TestMultiSyncManyUseless(t *testing.T) { }) } ) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems - source.storageTries = storageTries + source.setStorageTries(storageTries) source.storageValues = storageElems if !noAccount { @@ -784,6 +791,7 @@ func TestMultiSyncManyUseless(t *testing.T) { } syncer := setupSyncer( + nodeScheme, mkSource("full", true, true, true), mkSource("noAccounts", false, true, true), mkSource("noStorage", true, false, true), @@ -808,13 +816,13 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { }) } ) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems - source.storageTries = storageTries + source.setStorageTries(storageTries) source.storageValues = storageElems if !noAccount { @@ -830,6 +838,7 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { } syncer := setupSyncer( + nodeScheme, mkSource("full", true, true, true), mkSource("noAccounts", false, true, true), mkSource("noStorage", true, false, true), @@ -859,13 +868,13 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { }) } ) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems - source.storageTries = storageTries + source.setStorageTries(storageTries) source.storageValues = storageElems if !noAccount { @@ -881,6 +890,7 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { } syncer := setupSyncer( + nodeScheme, mkSource("full", true, true, true), mkSource("noAccounts", false, true, true), mkSource("noStorage", true, false, true), @@ -925,15 +935,16 @@ func TestSyncBoundaryAccountTrie(t *testing.T) { }) } ) - sourceAccountTrie, elems := makeBoundaryAccountTrie(3000) + nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(3000) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems return source } syncer := setupSyncer( + nodeScheme, mkSource("peer-a"), mkSource("peer-b"), ) @@ -959,11 +970,11 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { }) } ) - sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, slow bool) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems if slow { @@ -973,6 +984,7 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { } syncer := setupSyncer( + nodeScheme, mkSource("nice-a", false), mkSource("nice-b", false), mkSource("nice-c", false), @@ -1000,11 +1012,11 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { }) } ) - sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems source.codeRequestHandler = codeFn return source @@ -1014,6 +1026,7 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { // non-corrupt peer, which delivers everything in one go, and makes the // test moot syncer := setupSyncer( + nodeScheme, mkSource("capped", cappedCodeRequestHandler), mkSource("corrupt", corruptCodeRequestHandler), ) @@ -1037,11 +1050,11 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { }) } ) - sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, accFn accountHandlerFunc) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems source.accountRequestHandler = accFn return source @@ -1051,6 +1064,7 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { // non-corrupt peer, which delivers everything in one go, and makes the // test moot syncer := setupSyncer( + nodeScheme, mkSource("capped", defaultAccountRequestHandler), mkSource("corrupt", corruptAccountRequestHandler), ) @@ -1076,11 +1090,11 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { }) } ) - sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems source.codeRequestHandler = codeFn return source @@ -1089,6 +1103,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { // so it shouldn't be more than that var counter int syncer := setupSyncer( + nodeScheme, mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { counter++ return cappedCodeRequestHandler(t, id, hashes, max) @@ -1126,17 +1141,18 @@ func TestSyncBoundaryStorageTrie(t *testing.T) { }) } ) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems - source.storageTries = storageTries + source.setStorageTries(storageTries) source.storageValues = storageElems return source } syncer := setupSyncer( + nodeScheme, mkSource("peer-a"), mkSource("peer-b"), ) @@ -1162,13 +1178,13 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { }) } ) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false) mkSource := func(name string, slow bool) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems - source.storageTries = storageTries + source.setStorageTries(storageTries) source.storageValues = storageElems if slow { @@ -1178,6 +1194,7 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { } syncer := setupSyncer( + nodeScheme, mkSource("nice-a", false), mkSource("slow", true), ) @@ -1203,19 +1220,20 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) { }) } ) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems - source.storageTries = storageTries + source.setStorageTries(storageTries) source.storageValues = storageElems source.storageRequestHandler = handler return source } syncer := setupSyncer( + nodeScheme, mkSource("nice-a", defaultStorageRequestHandler), mkSource("nice-b", defaultStorageRequestHandler), mkSource("nice-c", defaultStorageRequestHandler), @@ -1241,18 +1259,19 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { }) } ) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems - source.storageTries = storageTries + source.setStorageTries(storageTries) source.storageValues = storageElems source.storageRequestHandler = handler return source } syncer := setupSyncer( + nodeScheme, mkSource("nice-a", defaultStorageRequestHandler), mkSource("nice-b", defaultStorageRequestHandler), mkSource("nice-c", defaultStorageRequestHandler), @@ -1281,18 +1300,18 @@ func TestSyncWithStorageMisbehavingProve(t *testing.T) { }) } ) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems - source.storageTries = storageTries + source.setStorageTries(storageTries) source.storageValues = storageElems source.storageRequestHandler = proofHappyStorageRequestHandler return source } - syncer := setupSyncer(mkSource("sourceA")) + syncer := setupSyncer(nodeScheme, mkSource("sourceA")) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } @@ -1349,7 +1368,7 @@ func getCodeByHash(hash common.Hash) []byte { } // makeAccountTrieNoStorage spits out a trie, along with the leafs -func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) { +func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) { db := trie.NewDatabase(rawdb.NewMemoryDatabase()) accTrie := trie.NewEmpty(db) var entries entrySlice @@ -1368,13 +1387,13 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) { } sort.Sort(entries) accTrie.Commit(nil) - return accTrie, entries + return db.Scheme(), accTrie, entries } // makeBoundaryAccountTrie constructs an account trie. Instead of filling // accounts normally, this function will fill a few accounts which have // boundary hash. -func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) { +func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) { var ( entries entrySlice boundaries []common.Hash @@ -1426,12 +1445,12 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) { } sort.Sort(entries) trie.Commit(nil) - return trie, entries + return db.Scheme(), trie, entries } // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts // has a unique storage set. -func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { +func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (string, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { var ( db = trie.NewDatabase(rawdb.NewMemoryDatabase()) accTrie = trie.NewEmpty(db) @@ -1447,7 +1466,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) codehash = getCodeHash(i) } // Create a storage trie - stTrie, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db) + _, stTrie, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db) stRoot := stTrie.Hash() stTrie.Commit(nil) value, _ := rlp.EncodeToBytes(&types.StateAccount{ @@ -1461,17 +1480,17 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) accTrie.Update(elem.k, elem.v) entries = append(entries, elem) - storageTries[common.BytesToHash(key)] = stTrie + storageTries[common.BytesToHash(key)] = stTrie.Copy() storageEntries[common.BytesToHash(key)] = stEntries } sort.Sort(entries) accTrie.Commit(nil) - return accTrie, entries, storageTries, storageEntries + return db.Scheme(), accTrie, entries, storageTries, storageEntries } // makeAccountTrieWithStorage spits out a trie, along with the leafs -func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { +func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (string, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { var ( db = trie.NewDatabase(rawdb.NewMemoryDatabase()) accTrie = trie.NewEmpty(db) @@ -1492,9 +1511,9 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie stEntries entrySlice ) if boundary { - stTrie, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db) + _, stTrie, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db) } else { - stTrie, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db) + _, stTrie, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db) } stRoot := stTrie.Hash() stTrie.Commit(nil) @@ -1510,18 +1529,18 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie accTrie.Update(elem.k, elem.v) entries = append(entries, elem) // we reuse the same one for all accounts - storageTries[common.BytesToHash(key)] = stTrie + storageTries[common.BytesToHash(key)] = stTrie.Copy() storageEntries[common.BytesToHash(key)] = stEntries } sort.Sort(entries) accTrie.Commit(nil) - return accTrie, entries, storageTries, storageEntries + return db.Scheme(), accTrie, entries, storageTries, storageEntries } // makeStorageTrieWithSeed fills a storage trie with n items, returning the // not-yet-committed trie and the sorted entries. The seeds can be used to ensure // that tries are unique. -func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) { +func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (string, *trie.Trie, entrySlice) { trie, _ := trie.New(owner, common.Hash{}, db) var entries entrySlice for i := uint64(1); i <= n; i++ { @@ -1538,13 +1557,13 @@ func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Databas } sort.Sort(entries) trie.Commit(nil) - return trie, entries + return db.Scheme(), trie, entries } // makeBoundaryStorageTrie constructs a storage trie. Instead of filling // storage slots normally, this function will fill a few slots which have // boundary hash. -func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (*trie.Trie, entrySlice) { +func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (string, *trie.Trie, entrySlice) { var ( entries entrySlice boundaries []common.Hash @@ -1589,12 +1608,12 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (*trie } sort.Sort(entries) trie.Commit(nil) - return trie, entries + return db.Scheme(), trie, entries } func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { t.Helper() - triedb := trie.NewDatabase(db) + triedb := trie.NewDatabase(rawdb.NewDatabase(db)) accTrie, err := trie.New(common.Hash{}, root, triedb) if err != nil { t.Fatal(err) @@ -1650,16 +1669,16 @@ func TestSyncAccountPerformance(t *testing.T) { }) } ) - sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) - source.accountTrie = sourceAccountTrie + source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems return source } src := mkSource("source") - syncer := setupSyncer(src) + syncer := setupSyncer(nodeScheme, src) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index f34ea5b61c..be022ef901 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -21,10 +21,8 @@ import ( "math/big" "os" "path/filepath" - "reflect" "strings" "testing" - "unicode" "github.com/PlatONnetwork/PlatON-Go/common" "github.com/PlatONnetwork/PlatON-Go/common/hexutil" @@ -44,56 +42,6 @@ import ( _ "github.com/PlatONnetwork/PlatON-Go/eth/tracers/native" ) -// To generate a new callTracer test, copy paste the makeTest method below into -// a Geth console and call it with a transaction hash you which to export. - -/* -// makeTest generates a callTracer test by running a prestate reassembled and a -// call trace run, assembling all the gathered information into a test case. -var makeTest = function(tx, rewind) { - // Generate the genesis block from the block, transaction and prestate data - var block = eth.getBlock(eth.getTransaction(tx).blockHash); - var genesis = eth.getBlock(block.parentHash); - - delete genesis.gasUsed; - delete genesis.logsBloom; - delete genesis.parentHash; - delete genesis.receiptsRoot; - delete genesis.sha3Uncles; - delete genesis.size; - delete genesis.transactions; - delete genesis.transactionsRoot; - delete genesis.uncles; - - genesis.gasLimit = genesis.gasLimit.toString(); - genesis.number = genesis.number.toString(); - genesis.timestamp = genesis.timestamp.toString(); - - genesis.alloc = debug.traceTransaction(tx, {tracer: "prestateTracer", rewind: rewind}); - for (var key in genesis.alloc) { - genesis.alloc[key].nonce = genesis.alloc[key].nonce.toString(); - } - genesis.config = admin.nodeInfo.protocols.eth.config; - - // Generate the call trace and produce the test input - var result = debug.traceTransaction(tx, {tracer: "callTracer", rewind: rewind}); - delete result.time; - - console.log(JSON.stringify({ - genesis: genesis, - context: { - number: block.number.toString(), - difficulty: block.difficulty, - timestamp: block.timestamp.toString(), - gasLimit: block.gasLimit.toString(), - miner: block.miner, - }, - input: eth.getRawTransaction(tx), - result: result, - }, null, 2)); -} -*/ - type callContext struct { Number math.HexOrDecimal64 `json:"number"` Difficulty *math.HexOrDecimal256 `json:"difficulty"` @@ -102,18 +50,28 @@ type callContext struct { Miner common.Address `json:"miner"` } +// callLog is the result of LOG opCode +type callLog struct { + Address common.Address `json:"address"` + Topics []common.Hash `json:"topics"` + Data hexutil.Bytes `json:"data"` +} + // callTrace is the result of a callTracer run. type callTrace struct { - Type string `json:"type"` - From common.Address `json:"from"` - To common.Address `json:"to"` - Input hexutil.Bytes `json:"input"` - Output hexutil.Bytes `json:"output"` - Gas *hexutil.Uint64 `json:"gas,omitempty"` - GasUsed *hexutil.Uint64 `json:"gasUsed,omitempty"` - Value *hexutil.Big `json:"value,omitempty"` - Error string `json:"error,omitempty"` - Calls []callTrace `json:"calls,omitempty"` + From common.Address `json:"from"` + Gas *hexutil.Uint64 `json:"gas"` + GasUsed *hexutil.Uint64 `json:"gasUsed"` + To common.Address `json:"to,omitempty"` + Input hexutil.Bytes `json:"input"` + Output hexutil.Bytes `json:"output,omitempty"` + Error string `json:"error,omitempty"` + RevertReason string `json:"revertReason,omitempty"` + Calls []callTrace `json:"calls,omitempty"` + Logs []callLog `json:"logs,omitempty"` + Value *hexutil.Big `json:"value,omitempty"` + // Gencodec adds overridden fields at the end + Type string `json:"type"` } // callTracerTest defines a single test to check the call tracer against. @@ -135,7 +93,12 @@ func TestCallTracerNative(t *testing.T) { testCallTracer("callTracer", "call_tracer", t) } +func TestCallTracerNativeWithLog(t *testing.T) { + testCallTracer("callTracer", "call_tracer_withLog", t) +} + func testCallTracer(tracerName string, dirPath string, t *testing.T) { + isLegacy := strings.HasSuffix(dirPath, "_legacy") files, err := os.ReadDir(filepath.Join("testdata", dirPath)) if err != nil { t.Fatalf("failed to retrieve tracer test suite: %v", err) @@ -158,7 +121,7 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) { } else if err := json.Unmarshal(blob, test); err != nil { t.Fatalf("failed to parse testcase: %v", err) } - if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil { + if err := tx.UnmarshalBinary(common.FromHex(test.Input)); err != nil { t.Fatalf("failed to parse testcase input: %v", err) } // Configure a blockchain with the given prestate @@ -174,9 +137,10 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) { Transfer: core.Transfer, Coinbase: test.Context.Miner, BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), - Time: new(big.Int).SetUint64(uint64(test.Context.Time)), + Time: uint64(test.Context.Time), Difficulty: (*big.Int)(test.Context.Difficulty), GasLimit: uint64(test.Context.GasLimit), + BaseFee: test.Genesis.BaseFee, } _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) ) @@ -190,56 +154,45 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) { t.Fatalf("failed to prepare transaction for tracing: %v", err) } st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) - if _, err = st.TransitionDb(); err != nil { + er, err := st.TransitionDb() + if err != nil { t.Fatalf("failed to execute transaction: %v", err) } - // Retrieve the trace result and compare against the etalon + // Retrieve the trace result and compare against the expected. res, err := tracer.GetResult() if err != nil { t.Fatalf("failed to retrieve trace result: %v", err) } - ret := new(callTrace) - if err := json.Unmarshal(res, ret); err != nil { - t.Fatalf("failed to unmarshal trace result: %v", err) + // The legacy javascript calltracer marshals json in js, which + // is not deterministic (as opposed to the golang json encoder). + if isLegacy { + // This is a tweak to make it deterministic. Can be removed when + // we remove the legacy tracer. + var x callTrace + json.Unmarshal(res, &x) + res, _ = json.Marshal(x) } - - if !jsonEqual(ret, test.Result) { - // uncomment this for easier debugging - //have, _ := json.MarshalIndent(ret, "", " ") - //want, _ := json.MarshalIndent(test.Result, "", " ") - //t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want)) - t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result) + want, err := json.Marshal(test.Result) + if err != nil { + t.Fatalf("failed to marshal test: %v", err) + } + if string(want) != string(res) { + t.Fatalf("trace mismatch\n have: %v\n want: %v\n", string(res), string(want)) + } + // Sanity check: compare top call's gas used against vm result + type simpleResult struct { + GasUsed hexutil.Uint64 + } + var topCall simpleResult + if err := json.Unmarshal(res, &topCall); err != nil { + t.Fatalf("failed to unmarshal top calls gasUsed: %v", err) + } + if uint64(topCall.GasUsed) != er.UsedGas { + t.Fatalf("top call has invalid gasUsed. have: %d want: %d", topCall.GasUsed, er.UsedGas) } }) } } - -// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to -// comparison -func jsonEqual(x, y interface{}) bool { - xTrace := new(callTrace) - yTrace := new(callTrace) - if xj, err := json.Marshal(x); err == nil { - json.Unmarshal(xj, xTrace) - } else { - return false - } - if yj, err := json.Marshal(y); err == nil { - json.Unmarshal(yj, yTrace) - } else { - return false - } - return reflect.DeepEqual(xTrace, yTrace) -} - -// camel converts a snake cased input string into a camel cased output. -func camel(str string) string { - pieces := strings.Split(str, "_") - for i := 1; i < len(pieces); i++ { - pieces[i] = string(unicode.ToUpper(rune(pieces[i][0]))) + pieces[i][1:] - } - return strings.Join(pieces, "") -} func BenchmarkTracers(b *testing.B) { files, err := os.ReadDir(filepath.Join("testdata", "call_tracer")) if err != nil { @@ -285,7 +238,7 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) { Transfer: core.Transfer, Coinbase: test.Context.Miner, BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), - Time: new(big.Int).SetUint64(uint64(test.Context.Time)), + Time: uint64(test.Context.Time), Difficulty: (*big.Int)(test.Context.Difficulty), GasLimit: uint64(test.Context.GasLimit), } @@ -339,7 +292,7 @@ func TestZeroValueToNotExitCall(t *testing.T) { Transfer: core.Transfer, Coinbase: common.Address{}, BlockNumber: new(big.Int).SetUint64(8000000), - Time: new(big.Int).SetUint64(5), + Time: 5, Difficulty: big.NewInt(0x30000), GasLimit: uint64(6000000), } @@ -378,14 +331,8 @@ func TestZeroValueToNotExitCall(t *testing.T) { if err != nil { t.Fatalf("failed to retrieve trace result: %v", err) } - have := new(callTrace) - if err := json.Unmarshal(res, have); err != nil { - t.Fatalf("failed to unmarshal trace result: %v", err) - } - wantStr := `{"type":"CALL","from":"0x682a80a6f560eec50d54e63cbeda1c324c5f8d1b","to":"0x00000000000000000000000000000000deadbeef","value":"0x0","gas":"0x7148","gasUsed":"0x2d0","input":"0x","output":"0x","calls":[{"type":"CALL","from":"0x00000000000000000000000000000000deadbeef","to":"0x00000000000000000000000000000000000000ff","value":"0x0","gas":"0x6cbf","gasUsed":"0x0","input":"0x","output":"0x"}]}` - want := new(callTrace) - json.Unmarshal([]byte(wantStr), want) - if !jsonEqual(have, want) { - t.Error("have != want") + wantStr := `{"from":"0x682a80a6f560eec50d54e63cbeda1c324c5f8d1b","gas":"0x7148","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0x6cbf","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}` + if string(res) != wantStr { + t.Fatalf("trace mismatch\n have: %v\n want: %v\n", string(res), wantStr) } } diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json index 6c4a5e1778..2e29e8c1a1 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json @@ -71,7 +71,8 @@ "input": "0x7d65837a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a529806c67cc6486d4d62024471772f47f6fd672", "output": "0x0000000000000000000000000000000000000000000000000000000000000001", "to": "0x42b02b5deeb78f34cd5ac896473b63e6c99a71a2", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value": "0x0" } ], "from": "0x269296dddce321a6bcbaa2f0181127593d732cba", diff --git a/eth/tracers/internal/tracetest/util.go b/eth/tracers/internal/tracetest/util.go new file mode 100644 index 0000000000..7a00913609 --- /dev/null +++ b/eth/tracers/internal/tracetest/util.go @@ -0,0 +1,72 @@ +package tracetest + +import ( + "strings" + "unicode" + + // Force-load native and js packages, to trigger registration + _ "github.com/PlatONnetwork/PlatON-Go/eth/tracers/js" + _ "github.com/PlatONnetwork/PlatON-Go/eth/tracers/native" +) + +// To generate a new callTracer test, copy paste the makeTest method below into +// a Geth console and call it with a transaction hash you which to export. + +/* +// makeTest generates a callTracer test by running a prestate reassembled and a +// call trace run, assembling all the gathered information into a test case. +var makeTest = function(tx, rewind) { + // Generate the genesis block from the block, transaction and prestate data + var block = eth.getBlock(eth.getTransaction(tx).blockHash); + var genesis = eth.getBlock(block.parentHash); + + delete genesis.gasUsed; + delete genesis.logsBloom; + delete genesis.parentHash; + delete genesis.receiptsRoot; + delete genesis.sha3Uncles; + delete genesis.size; + delete genesis.transactions; + delete genesis.transactionsRoot; + delete genesis.uncles; + + genesis.gasLimit = genesis.gasLimit.toString(); + genesis.number = genesis.number.toString(); + genesis.timestamp = genesis.timestamp.toString(); + + genesis.alloc = debug.traceTransaction(tx, {tracer: "prestateTracer", rewind: rewind}); + for (var key in genesis.alloc) { + var nonce = genesis.alloc[key].nonce; + if (nonce) { + genesis.alloc[key].nonce = nonce.toString(); + } + } + genesis.config = admin.nodeInfo.protocols.eth.config; + + // Generate the call trace and produce the test input + var result = debug.traceTransaction(tx, {tracer: "callTracer", rewind: rewind}); + delete result.time; + + console.log(JSON.stringify({ + genesis: genesis, + context: { + number: block.number.toString(), + difficulty: block.difficulty, + timestamp: block.timestamp.toString(), + gasLimit: block.gasLimit.toString(), + miner: block.miner, + }, + input: eth.getRawTransaction(tx), + result: result, + }, null, 2)); +} +*/ + +// camel converts a snake cased input string into a camel cased output. +func camel(str string) string { + pieces := strings.Split(str, "_") + for i := 1; i < len(pieces); i++ { + pieces[i] = string(unicode.ToUpper(rune(pieces[i][0]))) + pieces[i][1:] + } + return strings.Join(pieces, "") +} diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go index 8776a5f932..b2442b7c6e 100644 --- a/eth/tracers/js/tracer_test.go +++ b/eth/tracers/js/tracer_test.go @@ -78,7 +78,7 @@ func runTrace(tracer tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainCon ret, err := env.Interpreter().Run(contract, []byte{}, false) tracer.CaptureEnd(ret, startGas-contract.Gas, 1, err) // Rest gas assumes no refund - tracer.CaptureTxEnd(startGas - contract.Gas) + tracer.CaptureTxEnd(contract.Gas) if err != nil { return nil, err } @@ -107,15 +107,15 @@ func TestTracer(t *testing.T) { { // tests that we don't panic on bad arguments to memory access code: "{depths: [], step: function(log) { this.depths.push(log.memory.slice(-1,-2)); }, fault: function() {}, result: function() { return this.depths; }}", want: ``, - fail: "tracer accessed out of bound memory: offset -1, end -2 at step (:1:53(15)) in server-side tracer function 'step'", + fail: "tracer accessed out of bound memory: offset -1, end -2 at step (:1:53(13)) in server-side tracer function 'step'", }, { // tests that we don't panic on bad arguments to stack peeks code: "{depths: [], step: function(log) { this.depths.push(log.stack.peek(-1)); }, fault: function() {}, result: function() { return this.depths; }}", want: ``, - fail: "tracer accessed out of bound stack: size 0, index -1 at step (:1:53(13)) in server-side tracer function 'step'", + fail: "tracer accessed out of bound stack: size 0, index -1 at step (:1:53(11)) in server-side tracer function 'step'", }, { // tests that we don't panic on bad arguments to memory getUint code: "{ depths: [], step: function(log, db) { this.depths.push(log.memory.getUint(-64));}, fault: function() {}, result: function() { return this.depths; }}", want: ``, - fail: "tracer accessed out of bound memory: available 0, offset -64, size 32 at step (:1:58(13)) in server-side tracer function 'step'", + fail: "tracer accessed out of bound memory: available 0, offset -64, size 32 at step (:1:58(11)) in server-side tracer function 'step'", }, { // tests some general counting code: "{count: 0, step: function() { this.count += 1; }, fault: function() {}, result: function() { return this.count; }}", want: `3`, @@ -128,9 +128,9 @@ func TestTracer(t *testing.T) { }, { // tests to-string of opcodes code: "{opcodes: [], step: function(log) { this.opcodes.push(log.op.toString()); }, fault: function() {}, result: function() { return this.opcodes; }}", want: `["PUSH1","PUSH1","STOP"]`, - }, { // tests intrinsic gas - code: "{depths: [], step: function() {}, fault: function() {}, result: function(ctx) { return ctx.gasPrice+'.'+ctx.gasUsed+'.'+ctx.intrinsicGas; }}", - want: `"100000.6.21000"`, + }, { // tests gasUsed + code: "{depths: [], step: function() {}, fault: function() {}, result: function(ctx) { return ctx.gasPrice+'.'+ctx.gasUsed; }}", + want: `"100000.21006"`, }, { code: "{res: null, step: function(log) {}, fault: function() {}, result: function() { return toWord('0xffaa') }}", want: `{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":255,"31":170}`, @@ -150,7 +150,7 @@ func TestTracer(t *testing.T) { }, { code: "{res: [], step: function(log) { if (log.op.toString() === 'STOP') { this.res.push(log.memory.slice(5, 1025 * 1024)) } }, fault: function() {}, result: function() { return this.res }}", want: "", - fail: "tracer reached limit for padding memory slice: end 1049600, memorySize 32 at step (:1:83(23)) in server-side tracer function 'step'", + fail: "tracer reached limit for padding memory slice: end 1049600, memorySize 32 at step (:1:83(20)) in server-side tracer function 'step'", contract: []byte{byte(vm.PUSH1), byte(0xff), byte(vm.PUSH1), byte(0x00), byte(vm.MSTORE8), byte(vm.STOP)}, }, } { @@ -228,6 +228,33 @@ func TestNoStepExec(t *testing.T) { } } +func TestIsPrecompile(t *testing.T) { + txCtx := vm.TxContext{GasPrice: big.NewInt(100000)} + tracer, err := newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil) + if err != nil { + t.Fatal(err) + } + + blockCtx := vm.BlockContext{BlockNumber: big.NewInt(150)} + res, err := runTrace(tracer, &vmContext{blockCtx, txCtx}, params.TestChainConfig, nil) + if err != nil { + t.Error(err) + } + if string(res) != "false" { + t.Errorf("tracer should not consider blake2f as precompile in byzantium") + } + + tracer, _ = newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil) + blockCtx = vm.BlockContext{BlockNumber: big.NewInt(250)} + res, err = runTrace(tracer, &vmContext{blockCtx, txCtx}, params.TestChainConfig, nil) + if err != nil { + t.Error(err) + } + if string(res) != "true" { + t.Errorf("tracer should consider blake2f as precompile in istanbul") + } +} + func TestEnterExit(t *testing.T) { // test that either both or none of enter() and exit() are defined if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}}", new(tracers.Context), nil); err == nil { diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index 173611f202..1ca15995fd 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -23,8 +23,10 @@ import ( "sync/atomic" "time" - "github.com/PlatONnetwork/PlatON-Go/common" + "github.com/PlatONnetwork/PlatON-Go/accounts/abi" "github.com/PlatONnetwork/PlatON-Go/common/hexutil" + + "github.com/PlatONnetwork/PlatON-Go/common" "github.com/PlatONnetwork/PlatON-Go/core/vm" "github.com/PlatONnetwork/PlatON-Go/eth/tracers" ) @@ -35,16 +37,24 @@ func init() { tracers.DefaultDirectory.Register("callTracer", newCallTracer, false) } +type callLog struct { + Address common.Address `json:"address"` + Topics []common.Hash `json:"topics"` + Data hexutil.Bytes `json:"data"` +} + type callFrame struct { - Type vm.OpCode `json:"-"` - From common.Address `json:"from"` - Gas uint64 `json:"gas"` - GasUsed uint64 `json:"gasUsed"` - To common.Address `json:"to,omitempty" rlp:"optional"` - Input []byte `json:"input" rlp:"optional"` - Output []byte `json:"output,omitempty" rlp:"optional"` - Error string `json:"error,omitempty" rlp:"optional"` - Calls []callFrame `json:"calls,omitempty" rlp:"optional"` + Type vm.OpCode `json:"-"` + From common.Address `json:"from"` + Gas uint64 `json:"gas"` + GasUsed uint64 `json:"gasUsed"` + To common.Address `json:"to,omitempty" rlp:"optional"` + Input []byte `json:"input" rlp:"optional"` + Output []byte `json:"output,omitempty" rlp:"optional"` + Error string `json:"error,omitempty" rlp:"optional"` + RevertReason string `json:"revertReason,omitempty"` + Calls []callFrame `json:"calls,omitempty" rlp:"optional"` + Logs []callLog `json:"logs,omitempty" rlp:"optional"` // Placed at end on purpose. The RLP will be decoded to 0 instead of // nil if there are non-empty elements after in the struct. Value *big.Int `json:"value,omitempty" rlp:"optional"` @@ -54,6 +64,32 @@ func (f callFrame) TypeString() string { return f.Type.String() } +func (f callFrame) failed() bool { + return len(f.Error) > 0 +} + +func (f *callFrame) processOutput(output []byte, err error) { + output = common.CopyBytes(output) + if err == nil { + f.Output = output + return + } + f.Error = err.Error() + if f.Type == vm.CREATE || f.Type == vm.CREATE2 { + f.To = common.Address{} + } + if !errors.Is(err, vm.ErrExecutionReverted) || len(output) == 0 { + return + } + f.Output = output + if len(output) < 4 { + return + } + if unpacked, err := abi.UnpackRevert(output); err == nil { + f.RevertReason = unpacked + } +} + type callFrameMarshaling struct { TypeString string `json:"type"` Gas hexutil.Uint64 @@ -64,15 +100,17 @@ type callFrameMarshaling struct { } type callTracer struct { - env *vm.EVM + noopTracer callstack []callFrame config callTracerConfig + gasLimit uint64 interrupt uint32 // Atomic flag to signal execution interruption reason error // Textual reason for the interruption } type callTracerConfig struct { OnlyTopCall bool `json:"onlyTopCall"` // If true, call tracer won't collect any subcalls + WithLog bool `json:"withLog"` // If true, call tracer will collect event logs } // newCallTracer returns a native go tracer which tracks @@ -91,7 +129,6 @@ func newCallTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, e // CaptureStart implements the EVMLogger interface to initialize the tracing operation. func (t *callTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { - t.env = env t.callstack[0] = callFrame{ Type: vm.CALL, From: from, @@ -106,25 +143,44 @@ func (t *callTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Ad } // CaptureEnd is called after the call finishes to finalize the tracing. -func (t *callTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) { - t.callstack[0].GasUsed = gasUsed - output = common.CopyBytes(output) - if err != nil { - t.callstack[0].Error = err.Error() - if err.Error() == "execution reverted" && len(output) > 0 { - t.callstack[0].Output = output - } - } else { - t.callstack[0].Output = output - } +func (t *callTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { + t.callstack[0].processOutput(output, err) } // CaptureState implements the EVMLogger interface to trace a single step of VM execution. func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { -} + // Only logs need to be captured via opcode processing + if !t.config.WithLog { + return + } + // Avoid processing nested calls when only caring about top call + if t.config.OnlyTopCall && depth > 0 { + return + } + // Skip if tracing was interrupted + if atomic.LoadUint32(&t.interrupt) > 0 { + return + } + switch op { + case vm.LOG0, vm.LOG1, vm.LOG2, vm.LOG3, vm.LOG4: + size := int(op - vm.LOG0) + + stack := scope.Stack + stackData := stack.Data() + + // Don't modify the stack + mStart := stackData[len(stackData)-1] + mSize := stackData[len(stackData)-2] + topics := make([]common.Hash, size) + for i := 0; i < size; i++ { + topic := stackData[len(stackData)-2-(i+1)] + topics[i] = common.Hash(topic.Bytes32()) + } -// CaptureFault implements the EVMLogger interface to trace an execution fault. -func (t *callTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) { + data := scope.Memory.GetCopy(int64(mStart.Uint64()), int64(mSize.Uint64())) + log := callLog{Address: scope.Contract.Address(), Topics: topics, Data: hexutil.Bytes(data)} + t.callstack[len(t.callstack)-1].Logs = append(t.callstack[len(t.callstack)-1].Logs, log) + } } // CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). @@ -134,7 +190,6 @@ func (t *callTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common. } // Skip if tracing was interrupted if atomic.LoadUint32(&t.interrupt) > 0 { - t.env.Cancel() return } @@ -165,20 +220,21 @@ func (t *callTracer) CaptureExit(output []byte, gasUsed uint64, err error) { size -= 1 call.GasUsed = gasUsed - if err == nil { - call.Output = common.CopyBytes(output) - } else { - call.Error = err.Error() - if call.Type == vm.CREATE || call.Type == vm.CREATE2 { - call.To = common.Address{} - } - } + call.processOutput(output, err) t.callstack[size-1].Calls = append(t.callstack[size-1].Calls, call) } -func (*callTracer) CaptureTxStart(gasLimit uint64) {} +func (t *callTracer) CaptureTxStart(gasLimit uint64) { + t.gasLimit = gasLimit +} -func (*callTracer) CaptureTxEnd(restGas uint64) {} +func (t *callTracer) CaptureTxEnd(restGas uint64) { + t.callstack[0].GasUsed = t.gasLimit - restGas + if t.config.WithLog { + // Logs are not emitted when the call fails + clearFailedLogs(&t.callstack[0], false) + } +} // GetResult returns the json-encoded nested list of call traces, and any // error arising from the encoding or forceful termination (via `Stop`). @@ -198,3 +254,16 @@ func (t *callTracer) Stop(err error) { t.reason = err atomic.StoreUint32(&t.interrupt, 1) } + +// clearFailedLogs clears the logs of a callframe and all its children +// in case of execution failure. +func clearFailedLogs(cf *callFrame, parentFailed bool) { + failed := cf.failed() || parentFailed + // Clear own logs + if failed { + cf.Logs = nil + } + for i := range cf.Calls { + clearFailedLogs(&cf.Calls[i], failed) + } +} diff --git a/eth/tracers/native/gen_callframe_json.go b/eth/tracers/native/gen_callframe_json.go index 02c4065612..fd865f0f03 100644 --- a/eth/tracers/native/gen_callframe_json.go +++ b/eth/tracers/native/gen_callframe_json.go @@ -16,17 +16,19 @@ var _ = (*callFrameMarshaling)(nil) // MarshalJSON marshals as JSON. func (c callFrame) MarshalJSON() ([]byte, error) { type callFrame0 struct { - Type vm.OpCode `json:"-"` - From common.Address `json:"from"` - Gas hexutil.Uint64 `json:"gas"` - GasUsed hexutil.Uint64 `json:"gasUsed"` - To common.Address `json:"to,omitempty" rlp:"optional"` - Input hexutil.Bytes `json:"input" rlp:"optional"` - Output hexutil.Bytes `json:"output,omitempty" rlp:"optional"` - Error string `json:"error,omitempty" rlp:"optional"` - Calls []callFrame `json:"calls,omitempty" rlp:"optional"` - Value *hexutil.Big `json:"value,omitempty" rlp:"optional"` - TypeString string `json:"type"` + Type vm.OpCode `json:"-"` + From common.Address `json:"from"` + Gas hexutil.Uint64 `json:"gas"` + GasUsed hexutil.Uint64 `json:"gasUsed"` + To common.Address `json:"to,omitempty" rlp:"optional"` + Input hexutil.Bytes `json:"input" rlp:"optional"` + Output hexutil.Bytes `json:"output,omitempty" rlp:"optional"` + Error string `json:"error,omitempty" rlp:"optional"` + RevertReason string `json:"revertReason,omitempty"` + Calls []callFrame `json:"calls,omitempty" rlp:"optional"` + Logs []callLog `json:"logs,omitempty" rlp:"optional"` + Value *hexutil.Big `json:"value,omitempty" rlp:"optional"` + TypeString string `json:"type"` } var enc callFrame0 enc.Type = c.Type @@ -37,7 +39,9 @@ func (c callFrame) MarshalJSON() ([]byte, error) { enc.Input = c.Input enc.Output = c.Output enc.Error = c.Error + enc.RevertReason = c.RevertReason enc.Calls = c.Calls + enc.Logs = c.Logs enc.Value = (*hexutil.Big)(c.Value) enc.TypeString = c.TypeString() return json.Marshal(&enc) @@ -46,16 +50,18 @@ func (c callFrame) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals from JSON. func (c *callFrame) UnmarshalJSON(input []byte) error { type callFrame0 struct { - Type *vm.OpCode `json:"-"` - From *common.Address `json:"from"` - Gas *hexutil.Uint64 `json:"gas"` - GasUsed *hexutil.Uint64 `json:"gasUsed"` - To *common.Address `json:"to,omitempty" rlp:"optional"` - Input *hexutil.Bytes `json:"input" rlp:"optional"` - Output *hexutil.Bytes `json:"output,omitempty" rlp:"optional"` - Error *string `json:"error,omitempty" rlp:"optional"` - Calls []callFrame `json:"calls,omitempty" rlp:"optional"` - Value *hexutil.Big `json:"value,omitempty" rlp:"optional"` + Type *vm.OpCode `json:"-"` + From *common.Address `json:"from"` + Gas *hexutil.Uint64 `json:"gas"` + GasUsed *hexutil.Uint64 `json:"gasUsed"` + To *common.Address `json:"to,omitempty" rlp:"optional"` + Input *hexutil.Bytes `json:"input" rlp:"optional"` + Output *hexutil.Bytes `json:"output,omitempty" rlp:"optional"` + Error *string `json:"error,omitempty" rlp:"optional"` + RevertReason *string `json:"revertReason,omitempty"` + Calls []callFrame `json:"calls,omitempty" rlp:"optional"` + Logs []callLog `json:"logs,omitempty" rlp:"optional"` + Value *hexutil.Big `json:"value,omitempty" rlp:"optional"` } var dec callFrame0 if err := json.Unmarshal(input, &dec); err != nil { @@ -85,9 +91,15 @@ func (c *callFrame) UnmarshalJSON(input []byte) error { if dec.Error != nil { c.Error = *dec.Error } + if dec.RevertReason != nil { + c.RevertReason = *dec.RevertReason + } if dec.Calls != nil { c.Calls = dec.Calls } + if dec.Logs != nil { + c.Logs = dec.Logs + } if dec.Value != nil { c.Value = (*big.Int)(dec.Value) } diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index a7830c64de..85c9f48ff1 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -56,7 +56,7 @@ func BenchmarkTransactionTrace(b *testing.B) { Transfer: core.Transfer, Coinbase: common.Address{}, BlockNumber: new(big.Int).SetUint64(uint64(5)), - Time: new(big.Int).SetUint64(uint64(5)), + Time: 5, Difficulty: big.NewInt(0xffffffff), GasLimit: gas, BaseFee: big.NewInt(8), diff --git a/ethclient/gethclient/gethclient.go b/ethclient/gethclient/gethclient.go index 686597c613..366823bf66 100644 --- a/ethclient/gethclient/gethclient.go +++ b/ethclient/gethclient/gethclient.go @@ -96,6 +96,11 @@ func (ec *Client) GetProof(ctx context.Context, account common.Address, keys []s StorageProof []storageResult `json:"storageProof"` } + // Avoid keys being 'null'. + if keys == nil { + keys = []string{} + } + var res accountResult err := ec.c.CallContext(ctx, &res, "eth_getProof", account, keys, toBlockNumArg(blockNumber)) // Turn hexutils back to normal datatypes diff --git a/ethclient/gethclient/gethclient_test.go b/ethclient/gethclient/gethclient_test.go index b0587ee8bd..8447ca903a 100644 --- a/ethclient/gethclient/gethclient_test.go +++ b/ethclient/gethclient/gethclient_test.go @@ -299,7 +299,7 @@ func testSubscribePendingTransactions(t *testing.T, client *rpc.Client) { if err != nil { t.Fatal(err) } - // Check that the transaction was send over the channel + // Check that the transaction was sent over the channel hash := <-ch if hash != signedTx.Hash() { t.Fatalf("Invalid tx hash received, got %v, want %v", hash, signedTx.Hash()) diff --git a/ethdb/dbtest/testsuite.go b/ethdb/dbtest/testsuite.go index 65b87126d9..2d3a1ded27 100644 --- a/ethdb/dbtest/testsuite.go +++ b/ethdb/dbtest/testsuite.go @@ -18,6 +18,7 @@ package dbtest import ( "bytes" + "math/rand" "reflect" "sort" "testing" @@ -377,6 +378,101 @@ func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) { }) } +// BenchDatabaseSuite runs a suite of benchmarks against a KeyValueStore database +// implementation. +func BenchDatabaseSuite(b *testing.B, New func() ethdb.KeyValueStore) { + var ( + keys, vals = makeDataset(1_000_000, 32, 32, false) + sKeys, sVals = makeDataset(1_000_000, 32, 32, true) + ) + // Run benchmarks sequentially + b.Run("Write", func(b *testing.B) { + benchWrite := func(b *testing.B, keys, vals [][]byte) { + b.ResetTimer() + b.ReportAllocs() + + db := New() + defer db.Close() + + for i := 0; i < len(keys); i++ { + db.Put(keys[i], vals[i]) + } + } + b.Run("WriteSorted", func(b *testing.B) { + benchWrite(b, sKeys, sVals) + }) + b.Run("WriteRandom", func(b *testing.B) { + benchWrite(b, keys, vals) + }) + }) + b.Run("Read", func(b *testing.B) { + benchRead := func(b *testing.B, keys, vals [][]byte) { + db := New() + defer db.Close() + + for i := 0; i < len(keys); i++ { + db.Put(keys[i], vals[i]) + } + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < len(keys); i++ { + db.Get(keys[i]) + } + } + b.Run("ReadSorted", func(b *testing.B) { + benchRead(b, sKeys, sVals) + }) + b.Run("ReadRandom", func(b *testing.B) { + benchRead(b, keys, vals) + }) + }) + b.Run("Iteration", func(b *testing.B) { + benchIteration := func(b *testing.B, keys, vals [][]byte) { + db := New() + defer db.Close() + + for i := 0; i < len(keys); i++ { + db.Put(keys[i], vals[i]) + } + b.ResetTimer() + b.ReportAllocs() + + it := db.NewIterator(nil, nil) + for it.Next() { + } + it.Release() + } + b.Run("IterationSorted", func(b *testing.B) { + benchIteration(b, sKeys, sVals) + }) + b.Run("IterationRandom", func(b *testing.B) { + benchIteration(b, keys, vals) + }) + }) + b.Run("BatchWrite", func(b *testing.B) { + benchBatchWrite := func(b *testing.B, keys, vals [][]byte) { + b.ResetTimer() + b.ReportAllocs() + + db := New() + defer db.Close() + + batch := db.NewBatch() + for i := 0; i < len(keys); i++ { + batch.Put(keys[i], vals[i]) + } + batch.Write() + } + b.Run("BenchWriteSorted", func(b *testing.B) { + benchBatchWrite(b, sKeys, sVals) + }) + b.Run("BenchWriteRandom", func(b *testing.B) { + benchBatchWrite(b, keys, vals) + }) + }) +} + func iterateKeys(it ethdb.Iterator) []string { keys := []string{} for it.Next() { @@ -386,3 +482,25 @@ func iterateKeys(it ethdb.Iterator) []string { it.Release() return keys } + +// randomHash generates a random blob of data and returns it as a hash. +func randBytes(len int) []byte { + buf := make([]byte, len) + if n, err := rand.Read(buf); n != len || err != nil { + panic(err) + } + return buf +} + +func makeDataset(size, ksize, vsize int, order bool) ([][]byte, [][]byte) { + var keys [][]byte + var vals [][]byte + for i := 0; i < size; i += 1 { + keys = append(keys, randBytes(ksize)) + vals = append(vals, randBytes(vsize)) + } + if order { + sort.Slice(keys, func(i, j int) bool { return bytes.Compare(keys[i], keys[j]) < 0 }) + } + return keys, vals +} diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index 99ed9b9493..5c2824e742 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -64,18 +64,19 @@ type Database struct { fn string // filename for reporting db *leveldb.DB // LevelDB instance - compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction - compReadMeter metrics.Meter // Meter for measuring the data read during compaction - compWriteMeter metrics.Meter // Meter for measuring the data written during compaction - writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction - writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction - diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database - diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read - diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written - memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction - level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0 - nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level - seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt + compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction + compReadMeter metrics.Meter // Meter for measuring the data read during compaction + compWriteMeter metrics.Meter // Meter for measuring the data written during compaction + writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction + writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction + diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database + diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read + diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written + memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction + level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0 + nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level + seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt + manualMemAllocGauge metrics.Gauge // Gauge to track the amount of memory that has been manually allocated (not a part of runtime/GC) quitLock sync.Mutex // Mutex protecting the quit channel access quitChan chan chan error // Quit channel to stop the metrics collection before closing the database @@ -144,6 +145,7 @@ func NewCustom(file string, namespace string, customize func(options *opt.Option ldb.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil) ldb.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil) ldb.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil) + ldb.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil) // Start up the metrics gathering and return go ldb.meter(metricsGatheringInterval) diff --git a/ethdb/leveldb/leveldb_test.go b/ethdb/leveldb/leveldb_test.go index 0218f6156e..0f14745019 100644 --- a/ethdb/leveldb/leveldb_test.go +++ b/ethdb/leveldb/leveldb_test.go @@ -38,3 +38,15 @@ func TestLevelDB(t *testing.T) { }) }) } + +func BenchmarkLevelDB(b *testing.B) { + dbtest.BenchDatabaseSuite(b, func() ethdb.KeyValueStore { + db, err := leveldb.Open(storage.NewMemStorage(), nil) + if err != nil { + b.Fatal(err) + } + return &Database{ + db: db, + } + }) +} diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go new file mode 100644 index 0000000000..9e6e2f5750 --- /dev/null +++ b/ethdb/pebble/pebble.go @@ -0,0 +1,686 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package pebble implements the key-value database layer based on pebble. +package pebble + +import ( + "bytes" + "fmt" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/PlatONnetwork/PlatON-Go/common" + "github.com/PlatONnetwork/PlatON-Go/ethdb" + "github.com/PlatONnetwork/PlatON-Go/log" + "github.com/PlatONnetwork/PlatON-Go/metrics" + "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/bloom" +) + +const ( + // minCache is the minimum amount of memory in megabytes to allocate to pebble + // read and write caching, split half and half. + minCache = 16 + + // minHandles is the minimum number of files handles to allocate to the open + // database files. + minHandles = 16 + + // metricsGatheringInterval specifies the interval to retrieve pebble database + // compaction, io and pause stats to report to the user. + metricsGatheringInterval = 3 * time.Second + + // degradationWarnInterval specifies how often warning should be printed if the + // leveldb database cannot keep up with requested writes. + degradationWarnInterval = time.Minute +) + +// Database is a persistent key-value store based on the pebble storage engine. +// Apart from basic data storage functionality it also supports batch writes and +// iterating over the keyspace in binary-alphabetical order. +type Database struct { + fn string // filename for reporting + db *pebble.DB // Underlying pebble storage engine + + compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction + compReadMeter metrics.Meter // Meter for measuring the data read during compaction + compWriteMeter metrics.Meter // Meter for measuring the data written during compaction + writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction + writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction + diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database + diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read + diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written + memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction + level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0 + nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level + seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt + manualMemAllocGauge metrics.Gauge // Gauge for tracking amount of non-managed memory currently allocated + + levelsGauge []metrics.Gauge // Gauge for tracking the number of tables in levels + + quitLock sync.RWMutex // Mutex protecting the quit channel and the closed flag + quitChan chan chan error // Quit channel to stop the metrics collection before closing the database + closed bool // keep track of whether we're Closed + + log log.Logger // Contextual logger tracking the database path + + activeComp int // Current number of active compactions + compStartTime time.Time // The start time of the earliest currently-active compaction + compTime atomic.Int64 // Total time spent in compaction in ns + level0Comp atomic.Uint32 // Total number of level-zero compactions + nonLevel0Comp atomic.Uint32 // Total number of non level-zero compactions + + writeStalled atomic.Bool // Flag whether the write is stalled + writeDelayStartTime time.Time // The start time of the latest write stall + writeDelayCount atomic.Int64 // Total number of write stall counts + writeDelayTime atomic.Int64 // Total time spent in write stalls + + writeOptions *pebble.WriteOptions +} + +func (d *Database) onCompactionBegin(info pebble.CompactionInfo) { + if d.activeComp == 0 { + d.compStartTime = time.Now() + } + l0 := info.Input[0] + if l0.Level == 0 { + d.level0Comp.Add(1) + } else { + d.nonLevel0Comp.Add(1) + } + d.activeComp++ +} + +func (d *Database) onCompactionEnd(info pebble.CompactionInfo) { + if d.activeComp == 1 { + d.compTime.Add(int64(time.Since(d.compStartTime))) + } else if d.activeComp == 0 { + panic("should not happen") + } + d.activeComp-- +} + +func (d *Database) onWriteStallBegin(b pebble.WriteStallBeginInfo) { + d.writeDelayStartTime = time.Now() + d.writeDelayCount.Add(1) + d.writeStalled.Store(true) +} + +func (d *Database) onWriteStallEnd() { + d.writeDelayTime.Add(int64(time.Since(d.writeDelayStartTime))) + d.writeStalled.Store(false) +} + +// panicLogger is just a noop logger to disable Pebble's internal logger. +// +// TODO(karalabe): Remove when Pebble sets this as the default. +type panicLogger struct{} + +func (l panicLogger) Infof(format string, args ...interface{}) { +} + +func (l panicLogger) Errorf(format string, args ...interface{}) { +} + +func (l panicLogger) Fatalf(format string, args ...interface{}) { + panic(fmt.Errorf("fatal: "+format, args...)) +} + +// New returns a wrapped pebble DB object. The namespace is the prefix that the +// metrics reporting should use for surfacing internal stats. +func New(file string, cache int, handles int, namespace string, readonly bool, ephemeral bool) (*Database, error) { + // Ensure we have some minimal caching and file guarantees + if cache < minCache { + cache = minCache + } + if handles < minHandles { + handles = minHandles + } + logger := log.New("database", file) + logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles) + + // The max memtable size is limited by the uint32 offsets stored in + // internal/arenaskl.node, DeferredBatchOp, and flushableBatchEntry. + // + // - MaxUint32 on 64-bit platforms; + // - MaxInt on 32-bit platforms. + // + // It is used when slices are limited to Uint32 on 64-bit platforms (the + // length limit for slices is naturally MaxInt on 32-bit platforms). + // + // Taken from https://github.com/cockroachdb/pebble/blob/master/internal/constants/constants.go + maxMemTableSize := (1<<31)<<(^uint(0)>>63) - 1 + + // Two memory tables is configured which is identical to leveldb, + // including a frozen memory table and another live one. + memTableLimit := 2 + memTableSize := cache * 1024 * 1024 / 2 / memTableLimit + + // The memory table size is currently capped at maxMemTableSize-1 due to a + // known bug in the pebble where maxMemTableSize is not recognized as a + // valid size. + // + // TODO use the maxMemTableSize as the maximum table size once the issue + // in pebble is fixed. + if memTableSize >= maxMemTableSize { + memTableSize = maxMemTableSize - 1 + } + db := &Database{ + fn: file, + log: logger, + quitChan: make(chan chan error), + writeOptions: &pebble.WriteOptions{Sync: !ephemeral}, + } + opt := &pebble.Options{ + // Pebble has a single combined cache area and the write + // buffers are taken from this too. Assign all available + // memory allowance for cache. + Cache: pebble.NewCache(int64(cache * 1024 * 1024)), + MaxOpenFiles: handles, + + // The size of memory table(as well as the write buffer). + // Note, there may have more than two memory tables in the system. + MemTableSize: uint64(memTableSize), + + // MemTableStopWritesThreshold places a hard limit on the size + // of the existent MemTables(including the frozen one). + // Note, this must be the number of tables not the size of all memtables + // according to https://github.com/cockroachdb/pebble/blob/master/options.go#L738-L742 + // and to https://github.com/cockroachdb/pebble/blob/master/db.go#L1892-L1903. + MemTableStopWritesThreshold: memTableLimit, + + // The default compaction concurrency(1 thread), + // Here use all available CPUs for faster compaction. + MaxConcurrentCompactions: func() int { return runtime.NumCPU() }, + + // Per-level options. Options for at least one level must be specified. The + // options for the last level are used for all subsequent levels. + Levels: []pebble.LevelOptions{ + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + }, + ReadOnly: readonly, + EventListener: &pebble.EventListener{ + CompactionBegin: db.onCompactionBegin, + CompactionEnd: db.onCompactionEnd, + WriteStallBegin: db.onWriteStallBegin, + WriteStallEnd: db.onWriteStallEnd, + }, + Logger: panicLogger{}, // TODO(karalabe): Delete when this is upstreamed in Pebble + } + // Disable seek compaction explicitly. Check https://github.com/ethereum/go-ethereum/pull/20130 + // for more details. + opt.Experimental.ReadSamplingMultiplier = -1 + + // Open the db and recover any potential corruptions + innerDB, err := pebble.Open(file, opt) + if err != nil { + return nil, err + } + db.db = innerDB + + db.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil) + db.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil) + db.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil) + db.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil) + db.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil) + db.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil) + db.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil) + db.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil) + db.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil) + db.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil) + db.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil) + db.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil) + db.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil) + + // Start up the metrics gathering and return + go db.meter(metricsGatheringInterval, namespace) + return db, nil +} + +// Close stops the metrics collection, flushes any pending data to disk and closes +// all io accesses to the underlying key-value store. +func (d *Database) Close() error { + d.quitLock.Lock() + defer d.quitLock.Unlock() + // Allow double closing, simplifies things + if d.closed { + return nil + } + d.closed = true + if d.quitChan != nil { + errc := make(chan error) + d.quitChan <- errc + if err := <-errc; err != nil { + d.log.Error("Metrics collection failed", "err", err) + } + d.quitChan = nil + } + return d.db.Close() +} + +// Has retrieves if a key is present in the key-value store. +func (d *Database) Has(key []byte) (bool, error) { + d.quitLock.RLock() + defer d.quitLock.RUnlock() + if d.closed { + return false, pebble.ErrClosed + } + _, closer, err := d.db.Get(key) + if err == pebble.ErrNotFound { + return false, nil + } else if err != nil { + return false, err + } + closer.Close() + return true, nil +} + +// Get retrieves the given key if it's present in the key-value store. +func (d *Database) Get(key []byte) ([]byte, error) { + d.quitLock.RLock() + defer d.quitLock.RUnlock() + if d.closed { + return nil, pebble.ErrClosed + } + dat, closer, err := d.db.Get(key) + if err != nil { + return nil, err + } + ret := make([]byte, len(dat)) + copy(ret, dat) + closer.Close() + return ret, nil +} + +// Put inserts the given value into the key-value store. +func (d *Database) Put(key []byte, value []byte) error { + d.quitLock.RLock() + defer d.quitLock.RUnlock() + if d.closed { + return pebble.ErrClosed + } + return d.db.Set(key, value, d.writeOptions) +} + +// Delete removes the key from the key-value store. +func (d *Database) Delete(key []byte) error { + d.quitLock.RLock() + defer d.quitLock.RUnlock() + if d.closed { + return pebble.ErrClosed + } + return d.db.Delete(key, nil) +} + +// NewBatch creates a write-only key-value store that buffers changes to its host +// database until a final write is called. +func (d *Database) NewBatch() ethdb.Batch { + return &batch{ + b: d.db.NewBatch(), + db: d, + } +} + +// NewBatchWithSize creates a write-only database batch with pre-allocated buffer. +func (d *Database) NewBatchWithSize(size int) ethdb.Batch { + return &batch{ + b: d.db.NewBatchWithSize(size), + db: d, + } +} + +// snapshot wraps a pebble snapshot for implementing the Snapshot interface. +type snapshot struct { + db *pebble.Snapshot +} + +// NewSnapshot creates a database snapshot based on the current state. +// The created snapshot will not be affected by all following mutations +// happened on the database. +// Note don't forget to release the snapshot once it's used up, otherwise +// the stale data will never be cleaned up by the underlying compactor. +func (d *Database) NewSnapshot() (ethdb.Snapshot, error) { + snap := d.db.NewSnapshot() + return &snapshot{db: snap}, nil +} + +// Has retrieves if a key is present in the snapshot backing by a key-value +// data store. +func (snap *snapshot) Has(key []byte) (bool, error) { + _, closer, err := snap.db.Get(key) + if err != nil { + if err != pebble.ErrNotFound { + return false, err + } else { + return false, nil + } + } + closer.Close() + return true, nil +} + +// Get retrieves the given key if it's present in the snapshot backing by +// key-value data store. +func (snap *snapshot) Get(key []byte) ([]byte, error) { + dat, closer, err := snap.db.Get(key) + if err != nil { + return nil, err + } + ret := make([]byte, len(dat)) + copy(ret, dat) + closer.Close() + return ret, nil +} + +// Release releases associated resources. Release should always succeed and can +// be called multiple times without causing error. +func (snap *snapshot) Release() { + snap.db.Close() +} + +// upperBound returns the upper bound for the given prefix +func upperBound(prefix []byte) (limit []byte) { + for i := len(prefix) - 1; i >= 0; i-- { + c := prefix[i] + if c == 0xff { + continue + } + limit = make([]byte, i+1) + copy(limit, prefix) + limit[i] = c + 1 + break + } + return limit +} + +// Stat returns the internal metrics of Pebble in a text format. It's a developer +// method to read everything there is to read independent of Pebble version. +// +// The property is unused in Pebble as there's only one thing to retrieve. +func (d *Database) Stat(property string) (string, error) { + return d.db.Metrics().String(), nil +} + +// Compact flattens the underlying data store for the given key range. In essence, +// deleted and overwritten versions are discarded, and the data is rearranged to +// reduce the cost of operations needed to access them. +// +// A nil start is treated as a key before all keys in the data store; a nil limit +// is treated as a key after all keys in the data store. If both is nil then it +// will compact entire data store. +func (d *Database) Compact(start []byte, limit []byte) error { + // There is no special flag to represent the end of key range + // in pebble(nil in leveldb). Use an ugly hack to construct a + // large key to represent it. + // Note any prefixed database entry will be smaller than this + // flag, as for trie nodes we need the 32 byte 0xff because + // there might be a shared prefix starting with a number of + // 0xff-s, so 32 ensures than only a hash collision could touch it. + // https://github.com/cockroachdb/pebble/issues/2359#issuecomment-1443995833 + if limit == nil { + limit = bytes.Repeat([]byte{0xff}, 32) + } + return d.db.Compact(start, limit, true) // Parallelization is preferred +} + +// Path returns the path to the database directory. +func (d *Database) Path() string { + return d.fn +} + +// meter periodically retrieves internal pebble counters and reports them to +// the metrics subsystem. +func (d *Database) meter(refresh time.Duration, namespace string) { + var errc chan error + timer := time.NewTimer(refresh) + defer timer.Stop() + + // Create storage and warning log tracer for write delay. + var ( + compTimes [2]int64 + compWrites [2]int64 + compReads [2]int64 + + nWrites [2]int64 + + writeDelayTimes [2]int64 + writeDelayCounts [2]int64 + lastWriteStallReport time.Time + ) + + // Iterate ad infinitum and collect the stats + for i := 1; errc == nil; i++ { + var ( + compWrite int64 + compRead int64 + nWrite int64 + + stats = d.db.Metrics() + compTime = d.compTime.Load() + writeDelayCount = d.writeDelayCount.Load() + writeDelayTime = d.writeDelayTime.Load() + nonLevel0CompCount = int64(d.nonLevel0Comp.Load()) + level0CompCount = int64(d.level0Comp.Load()) + ) + writeDelayTimes[i%2] = writeDelayTime + writeDelayCounts[i%2] = writeDelayCount + compTimes[i%2] = compTime + + for _, levelMetrics := range stats.Levels { + nWrite += int64(levelMetrics.BytesCompacted) + nWrite += int64(levelMetrics.BytesFlushed) + compWrite += int64(levelMetrics.BytesCompacted) + compRead += int64(levelMetrics.BytesRead) + } + + nWrite += int64(stats.WAL.BytesWritten) + + compWrites[i%2] = compWrite + compReads[i%2] = compRead + nWrites[i%2] = nWrite + + if d.writeDelayNMeter != nil { + d.writeDelayNMeter.Mark(writeDelayCounts[i%2] - writeDelayCounts[(i-1)%2]) + } + if d.writeDelayMeter != nil { + d.writeDelayMeter.Mark(writeDelayTimes[i%2] - writeDelayTimes[(i-1)%2]) + } + // Print a warning log if writing has been stalled for a while. The log will + // be printed per minute to avoid overwhelming users. + if d.writeStalled.Load() && writeDelayCounts[i%2] == writeDelayCounts[(i-1)%2] && + time.Now().After(lastWriteStallReport.Add(degradationWarnInterval)) { + d.log.Warn("Database compacting, degraded performance") + lastWriteStallReport = time.Now() + } + if d.compTimeMeter != nil { + d.compTimeMeter.Mark(compTimes[i%2] - compTimes[(i-1)%2]) + } + if d.compReadMeter != nil { + d.compReadMeter.Mark(compReads[i%2] - compReads[(i-1)%2]) + } + if d.compWriteMeter != nil { + d.compWriteMeter.Mark(compWrites[i%2] - compWrites[(i-1)%2]) + } + if d.diskSizeGauge != nil { + d.diskSizeGauge.Update(int64(stats.DiskSpaceUsage())) + } + if d.diskReadMeter != nil { + d.diskReadMeter.Mark(0) // pebble doesn't track non-compaction reads + } + if d.diskWriteMeter != nil { + d.diskWriteMeter.Mark(nWrites[i%2] - nWrites[(i-1)%2]) + } + // See https://github.com/cockroachdb/pebble/pull/1628#pullrequestreview-1026664054 + manuallyAllocated := stats.BlockCache.Size + int64(stats.MemTable.Size) + int64(stats.MemTable.ZombieSize) + d.manualMemAllocGauge.Update(manuallyAllocated) + d.memCompGauge.Update(stats.Flush.Count) + d.nonlevel0CompGauge.Update(nonLevel0CompCount) + d.level0CompGauge.Update(level0CompCount) + d.seekCompGauge.Update(stats.Compact.ReadCount) + + for i, level := range stats.Levels { + // Append metrics for additional layers + if i >= len(d.levelsGauge) { + d.levelsGauge = append(d.levelsGauge, metrics.NewRegisteredGauge(namespace+fmt.Sprintf("tables/level%v", i), nil)) + } + d.levelsGauge[i].Update(level.NumFiles) + } + + // Sleep a bit, then repeat the stats collection + select { + case errc = <-d.quitChan: + // Quit requesting, stop hammering the database + case <-timer.C: + timer.Reset(refresh) + // Timeout, gather a new set of stats + } + } + errc <- nil +} + +// batch is a write-only batch that commits changes to its host database +// when Write is called. A batch cannot be used concurrently. +type batch struct { + b *pebble.Batch + db *Database + size int +} + +// Put inserts the given value into the batch for later committing. +func (b *batch) Put(key, value []byte) error { + b.b.Set(key, value, nil) + b.size += len(key) + len(value) + return nil +} + +// Delete inserts the a key removal into the batch for later committing. +func (b *batch) Delete(key []byte) error { + b.b.Delete(key, nil) + b.size += len(key) + return nil +} + +// ValueSize retrieves the amount of data queued up for writing. +func (b *batch) ValueSize() int { + return b.size +} + +// Write flushes any accumulated data to disk. +func (b *batch) Write() error { + b.db.quitLock.RLock() + defer b.db.quitLock.RUnlock() + if b.db.closed { + return pebble.ErrClosed + } + return b.b.Commit(b.db.writeOptions) +} + +// Reset resets the batch for reuse. +func (b *batch) Reset() { + b.b.Reset() + b.size = 0 +} + +// Replay replays the batch contents. +func (b *batch) Replay(w ethdb.KeyValueWriter) error { + reader := b.b.Reader() + for { + kind, k, v, ok, err := reader.Next() + if !ok || err != nil { + break + } + // The (k,v) slices might be overwritten if the batch is reset/reused, + // and the receiver should copy them if they are to be retained long-term. + if kind == pebble.InternalKeyKindSet { + w.Put(k, v) + } else if kind == pebble.InternalKeyKindDelete { + w.Delete(k) + } else { + return fmt.Errorf("unhandled operation, keytype: %v", kind) + } + } + return nil +} + +// pebbleIterator is a wrapper of underlying iterator in storage engine. +// The purpose of this structure is to implement the missing APIs. +// +// The pebble iterator is not thread-safe. +type pebbleIterator struct { + iter *pebble.Iterator + moved bool + released bool +} + +// NewIterator creates a binary-alphabetical iterator over a subset +// of database content with a particular key prefix, starting at a particular +// initial key (or after, if it does not exist). +func (d *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { + iter, _ := d.db.NewIter(&pebble.IterOptions{ + LowerBound: append(prefix, start...), + UpperBound: upperBound(prefix), + }) + iter.First() + return &pebbleIterator{iter: iter, moved: true, released: false} +} + +// Next moves the iterator to the next key/value pair. It returns whether the +// iterator is exhausted. +func (iter *pebbleIterator) Next() bool { + if iter.moved { + iter.moved = false + return iter.iter.Valid() + } + return iter.iter.Next() +} + +// Error returns any accumulated error. Exhausting all the key/value pairs +// is not considered to be an error. +func (iter *pebbleIterator) Error() error { + return iter.iter.Error() +} + +// Key returns the key of the current key/value pair, or nil if done. The caller +// should not modify the contents of the returned slice, and its contents may +// change on the next call to Next. +func (iter *pebbleIterator) Key() []byte { + return iter.iter.Key() +} + +// Value returns the value of the current key/value pair, or nil if done. The +// caller should not modify the contents of the returned slice, and its contents +// may change on the next call to Next. +func (iter *pebbleIterator) Value() []byte { + return iter.iter.Value() +} + +// Release releases associated resources. Release should always succeed and can +// be called multiple times without causing error. +func (iter *pebbleIterator) Release() { + if !iter.released { + iter.iter.Close() + iter.released = true + } +} diff --git a/ethdb/pebble/pebble_test.go b/ethdb/pebble/pebble_test.go new file mode 100644 index 0000000000..71de0e33f8 --- /dev/null +++ b/ethdb/pebble/pebble_test.go @@ -0,0 +1,56 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pebble + +import ( + "testing" + + "github.com/PlatONnetwork/PlatON-Go/ethdb" + "github.com/PlatONnetwork/PlatON-Go/ethdb/dbtest" + "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/vfs" +) + +func TestPebbleDB(t *testing.T) { + t.Run("DatabaseSuite", func(t *testing.T) { + dbtest.TestDatabaseSuite(t, func() ethdb.KeyValueStore { + db, err := pebble.Open("", &pebble.Options{ + FS: vfs.NewMem(), + }) + if err != nil { + t.Fatal(err) + } + return &Database{ + db: db, + } + }) + }) +} + +func BenchmarkPebbleDB(b *testing.B) { + dbtest.BenchDatabaseSuite(b, func() ethdb.KeyValueStore { + db, err := pebble.Open("", &pebble.Options{ + FS: vfs.NewMem(), + }) + if err != nil { + b.Fatal(err) + } + return &Database{ + db: db, + } + }) +} diff --git a/go.mod b/go.mod index c8e2472936..1d84bb31b5 100644 --- a/go.mod +++ b/go.mod @@ -1,37 +1,39 @@ module github.com/PlatONnetwork/PlatON-Go -go 1.18 +go 1.22 + +toolchain go1.22.0 require ( github.com/AlayaNetwork/graphql-go v1.2.1-0.20211227063951-8d66eefcb4e3 github.com/PlatONnetwork/wagon v0.6.1-0.20201026015350-67507c2a7b96 - github.com/VictoriaMetrics/fastcache v1.5.7 + github.com/VictoriaMetrics/fastcache v1.12.1 github.com/btcsuite/btcd/btcutil v1.1.0 github.com/cespare/cp v0.1.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set/v2 v2.1.0 github.com/docker/docker v24.0.7+incompatible - github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 - github.com/fatih/color v1.7.0 + github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 + github.com/fatih/color v1.13.0 github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e - github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 + github.com/fjl/memsize v0.0.2 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 + github.com/go-errors/errors v1.4.2 github.com/go-stack/stack v1.8.1 - github.com/golang/protobuf v1.5.2 - github.com/golang/snappy v0.0.4 + github.com/golang/protobuf v1.5.4 + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/google/gofuzz v1.2.0 - github.com/google/uuid v1.1.5 + github.com/google/uuid v1.3.0 github.com/gorilla/websocket v1.4.2 github.com/graph-gophers/graphql-go v1.3.0 github.com/hashicorp/go-bexpr v0.1.10 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/holiman/bloomfilter/v2 v2.0.3 - github.com/holiman/uint256 v1.2.0 - github.com/huin/goupnp v1.0.3 + github.com/holiman/uint256 v1.2.4 + github.com/huin/goupnp v1.3.0 github.com/influxdata/influxdb v1.7.6 - github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 - github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b + github.com/jackpal/go-nat-pmp v1.0.2 + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 github.com/julienschmidt/httprouter v1.3.0 github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.18 @@ -39,7 +41,7 @@ require ( github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.4.1 - github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f + github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 github.com/pkg/errors v0.9.1 github.com/prometheus/tsdb v0.10.0 github.com/rjeczalik/notify v0.9.1 @@ -47,15 +49,14 @@ require ( github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible github.com/status-im/keycard-go v0.2.0 - github.com/stretchr/testify v1.8.0 + github.com/stretchr/testify v1.8.4 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tealeg/xlsx v1.0.5 golang.org/x/crypto v0.22.0 - golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 golang.org/x/sync v0.7.0 golang.org/x/sys v0.19.0 golang.org/x/text v0.14.0 - golang.org/x/time v0.3.0 + golang.org/x/time v0.5.0 golang.org/x/tools v0.20.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce @@ -64,47 +65,63 @@ require ( require ( github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c - github.com/deckarep/golang-set v1.7.1 + github.com/cockroachdb/pebble v1.1.0 github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/ethereum/go-ethereum v1.14.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/herumi/bls v1.37.0 github.com/influxdata/influxdb-client-go/v2 v2.12.3 github.com/karalabe/usb v0.0.2 - github.com/urfave/cli/v2 v2.24.1 + github.com/urfave/cli/v2 v2.25.7 + golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa golang.org/x/net v0.24.0 ) require ( github.com/AlayaNetwork/Alaya-Go v0.16.2 // indirect - github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/DataDog/zstd v1.4.5 // indirect + github.com/StackExchange/wmi v1.2.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.11.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect - github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect + github.com/dlclark/regexp2 v1.7.0 // indirect github.com/edsrzf/mmap-go v1.0.1-0.20190108065903-904c4ced31cd // indirect - github.com/garslo/gogen v0.0.0-20230926014519-f497ca02dd4c // indirect - github.com/go-logfmt/logfmt v0.4.0 // indirect - github.com/go-ole/go-ole v1.2.4 // indirect + github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect + github.com/getsentry/sentry-go v0.18.0 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/google/go-cmp v0.5.9 // indirect + github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect + github.com/klauspost/compress v1.15.15 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mitchellh/mapstructure v1.4.1 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/naoina/go-stringutil v0.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.12.0 // indirect + github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/tklauser/go-sysconf v0.3.11 // indirect - github.com/tklauser/numcpus v0.6.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/twitchyliquid64/golang-asm v0.0.0-20190126203739-365674df15fc // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect golang.org/x/mod v0.17.0 // indirect - google.golang.org/protobuf v1.27.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.1 // indirect ) diff --git a/go.sum b/go.sum index d687caf890..9a1f75c85f 100644 --- a/go.sum +++ b/go.sum @@ -1,23 +1,67 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AlayaNetwork/Alaya-Go v0.16.2 h1:tPrnCCjbDuFRR32t1tL3JN0AaE4DXtnOKYgzKrlsL9Y= github.com/AlayaNetwork/Alaya-Go v0.16.2/go.mod h1:lJ4mmeCHCMStBae9mvxdij5eMst0lHB/nLOUeT0E42g= github.com/AlayaNetwork/graphql-go v1.2.1-0.20211227063951-8d66eefcb4e3 h1:bkL6xSZwj6ls+xhKcf5FyX4t1ZV9QtE3aBe3epedzv8= github.com/AlayaNetwork/graphql-go v1.2.1-0.20211227063951-8d66eefcb4e3/go.mod h1:RHF+g+4wWGXz6ICiHRyomjpcdRi+vt7SAzXGzuXXxPk= github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e/go.mod h1:x2mtS6O3mnMEZOJp7d7oldh8IvatBrMfReiyQ+cKgKY= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PlatONnetwork/wagon v0.6.1-0.20201026015350-67507c2a7b96 h1:BA5xEQQrv82VdaxUoAZeGi/G/UQ3z6z+eNe2rijhhpg= github.com/PlatONnetwork/wagon v0.6.1-0.20201026015350-67507c2a7b96/go.mod h1:zPWloKR2Ep7uqrhyLyE483NCxlAlQnbPsQUJXWN6bVM= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= +github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= +github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c h1:lnAMg3ra/Gw4AkRMxrxYs8nrprWsHowg8H9zaYsJOo4= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= @@ -35,11 +79,34 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= +github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4= +github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -48,7 +115,6 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ= github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= @@ -60,53 +126,66 @@ github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRk github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= +github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/docker v17.12.0-ce-rc1.0.20180625184442-8e610b2b55bf+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= -github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ= -github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= -github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3vYNQgR+/ZIy0RmeUDZo7Y8zeQ= -github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE= +github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.1-0.20190108065903-904c4ced31cd h1:5YaQigFlQK50yop9LoQwxVAic19OCuAygbETTgB/n0Q= github.com/edsrzf/mmap-go v1.0.1-0.20190108065903-904c4ced31cd/go.mod h1:W3m91qexYIu40kcj8TLXNUSTCKprH8UQ3GgH5/Xyfc0= github.com/elastic/gosigar v0.14.1/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ethereum/go-ethereum v1.14.0 h1:xRWC5NlB6g1x7vNy4HDBLuqVNbtLrc7v8S6+Uxim1LU= +github.com/ethereum/go-ethereum v1.14.0/go.mod h1:1STrq471D0BQbCX9He0hUj4bHxX2k6mt5nOQJhDNOJ8= github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0= -github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY= github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= -github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= +github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 h1:IZqZOB2fydHte3kUgxrzK5E1fW7RQGeDwE8F/ZZnUYc= github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= -github.com/garslo/gogen v0.0.0-20230926014519-f497ca02dd4c h1:40KPNY+PW1YryU4J3aX9kNAgjXIXXyfloGA732AnOQc= -github.com/garslo/gogen v0.0.0-20230926014519-f497ca02dd4c/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= +github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= -github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU= github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= @@ -115,33 +194,76 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.5 h1:kxhtnfFVi+rYdOALN0B3k9UT86zVJKfBimRaciULW4I= -github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -149,6 +271,8 @@ github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY4 github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -157,13 +281,14 @@ github.com/herumi/bls v1.37.0/go.mod h1:CnmR5QZ/QBnBE8Z55O+OtmUc6ICUdrOW9fwSRQwz github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= -github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= -github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= +github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= -github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= -github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb v1.7.6 h1:8mQ7A/V+3noMGCt/P9pD09ISaiz9XvgCk303UYA3gcs= github.com/influxdata/influxdb v1.7.6/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= @@ -171,26 +296,38 @@ github.com/influxdata/influxdb-client-go/v2 v2.12.3 h1:28nRlNMRIV4QbtIUvxhWqaxn0 github.com/influxdata/influxdb-client-go/v2 v2.12.3/go.mod h1:IrrLUbCjjfkmRuaCiGQg4m2GbkaeJDcuWoxiWdQEbA0= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= -github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b h1:ZGiXF8sz7PDk6RgkP+A/SFfUD0ZR/AgG6SpRNEDKZy8= -github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b/go.mod h1:hQmNrgofl+IY/8L+n20H6E6PWBBTokdsv+q49j0QhsU= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/karalabe/hid v0.0.0-20170821103837-f00545f9f374/go.mod h1:YvbcH+3Wo6XPs9nkgTY3u19KXLauXW+J5nB7hEHuX0A= github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4= github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -208,29 +345,38 @@ github.com/mattn/go-colorable v0.0.8-0.20170210172801-5411d3eea597/go.mod h1:9vu github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.0-20170209175615-281032e84ae0/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mroth/weightedrand v0.3.0 h1:8dllRVd5Y1jIJsigUpQaL5DUlV4qBiM+SF82d+cMwew= github.com/mroth/weightedrand v0.3.0/go.mod h1:3p2SIcC8al1YMzGhAIoXD+r9olo/g/cdJgAD905gyNE= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= @@ -258,8 +404,10 @@ github.com/panjf2000/ants/v2 v2.4.1 h1:7RtUqj5lGOw0WnZhSKDZ2zzJhaX5490ZW1sUolRXC github.com/panjf2000/ants/v2 v2.4.1/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A= github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/peterh/liner v1.0.1-0.20170902204657-a37ad3984311/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= -github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f h1:O62NGAXV0cNzBI6e7vI3zTHSTgPHsWIcS3Q4XC1/pAU= -github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -269,11 +417,27 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg= +github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a h1:CmF68hwI0XsOQ5UwlBopMi2Ow4Pbg32akc4KIVCOm+Y= +github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic= github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -284,6 +448,8 @@ github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRr github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= github.com/robfig/cron v1.2.1-0.20190616124356-61d93e07d1be h1:cTYH+ynfj8/qqD+GzR5msO6YxoR9dQSfRIHnsKyg50E= github.com/robfig/cron v1.2.1-0.20190616124356-61d93e07d1be/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= @@ -296,6 +462,8 @@ github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= @@ -303,81 +471,151 @@ github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8 github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tealeg/xlsx v1.0.5 h1:+f8oFmvY8Gw1iUXzPk+kz+4GpbDZPK1FhPiQRd+ypgE= github.com/tealeg/xlsx v1.0.5/go.mod h1:btRS8dz54TDnvKNosuAqxrM1QgN1udgk9O34bDCnORM= -github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= -github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= -github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= -github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/twitchyliquid64/golang-asm v0.0.0-20190126203739-365674df15fc h1:RTUQlKzoZZVG3umWNzOYeFecQLIh+dbxXvJp1zPQJTI= github.com/twitchyliquid64/golang-asm v0.0.0-20190126203739-365674df15fc/go.mod h1:NoCfSFWosfqMqmmD7hApkirIK9ozpHjxRnRxs1l413A= -github.com/urfave/cli/v2 v2.24.1 h1:/QYYr7g0EhwXEML8jO+8OYt5trPnLHS0p3mrgExJ5NU= -github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= +github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= +github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -387,51 +625,125 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190306220234-b354f8bf4d9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -439,22 +751,91 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= @@ -466,6 +847,7 @@ gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHO gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -475,6 +857,15 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/graphql/graphiql.go b/graphql/graphiql.go index 22bb281fd1..3f317d3bab 100644 --- a/graphql/graphiql.go +++ b/graphql/graphiql.go @@ -48,7 +48,7 @@ func errorJSON(msg string) []byte { } func (h GraphiQL) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { + if r.Method != http.MethodGet { respond(w, errorJSON("only GET requests are supported"), http.StatusMethodNotAllowed) return } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 562e1a2f0b..dade5f2f3c 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -879,7 +879,7 @@ func (diff *StateOverride) Apply(state *state.StateDB) error { type BlockOverrides struct { Number *hexutil.Big Difficulty *hexutil.Big - Time *hexutil.Big + Time *hexutil.Uint64 GasLimit *hexutil.Uint64 Coinbase *common.Address Random *common.Hash @@ -898,7 +898,7 @@ func (diff *BlockOverrides) Apply(blockCtx *vm.BlockContext) { blockCtx.Difficulty = diff.Difficulty.ToInt() } if diff.Time != nil { - blockCtx.Time = diff.Time.ToInt() + blockCtx.Time = uint64(*diff.Time) } if diff.GasLimit != nil { blockCtx.GasLimit = uint64(*diff.GasLimit) @@ -1184,7 +1184,9 @@ func RPCMarshalHeader(head *types.Header, ethCompatible bool) map[string]interfa if head.BaseFee != nil { result["baseFeePerGas"] = (*hexutil.Big)(head.BaseFee) } - + if head.WithdrawalsHash != nil { + result["withdrawalsRoot"] = head.WithdrawalsHash + } return result } @@ -1214,9 +1216,12 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param } } fields["transactions"] = transactions + } if ethCompatible { fields["uncles"] = make([]common.Hash, 0) + // inclTx also expands withdrawals + fields["withdrawals"] = block.Withdrawals() } return fields, nil @@ -1229,6 +1234,10 @@ func (s *BlockChainAPI) rpcMarshalHeader(header *types.Header) map[string]interf fields := RPCMarshalHeader(header, ethCompatible) if ethCompatible { fields["totalDifficulty"] = (*hexutil.Big)(new(big.Int)) + + if header.WithdrawalsHash != nil { + fields["withdrawalsRoot"] = header.WithdrawalsHash + } } return fields } @@ -1242,6 +1251,10 @@ func (s *BlockChainAPI) rpcMarshalBlock(b *types.Block, inclTx bool, fullTx bool } if inclTx && types.HttpEthCompatible { fields["totalDifficulty"] = (*hexutil.Big)(new(big.Int)) + + if b.Header().WithdrawalsHash != nil { + fields["withdrawals"] = b.Withdrawals() + } } return fields, err } diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 8ade91fa13..e0e678de63 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -90,7 +90,7 @@ type Backend interface { // eth/filters needs to be initialized from this backend type, so methods needed by // it must also be included here. - filters.Backend + GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) } func GetAPIs(apiBackend Backend) []rpc.API { diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go index 4cd3b093f9..af94e317c2 100644 --- a/internal/ethapi/transaction_args_test.go +++ b/internal/ethapi/transaction_args_test.go @@ -285,6 +285,9 @@ func (b *backendMock) BlockByHash(ctx context.Context, hash common.Hash) (*types func (b *backendMock) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) { return nil, nil } +func (b *backendMock) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + return nil, nil +} func (b *backendMock) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) { return nil, nil, nil } diff --git a/internal/jsre/completion.go b/internal/jsre/completion.go index 538fca298d..844a0532fd 100644 --- a/internal/jsre/completion.go +++ b/internal/jsre/completion.go @@ -17,12 +17,16 @@ package jsre import ( + "regexp" "sort" "strings" "github.com/dop251/goja" ) +// JS numerical token +var numerical = regexp.MustCompile(`^(NaN|-?((\d*\.\d+|\d+)([Ee][+-]?\d+)?|Infinity))$`) + // CompleteKeywords returns potential continuations for the given line. Since line is // evaluated, callers need to make sure that evaluating line does not have side effects. func (jsre *JSRE) CompleteKeywords(line string) []string { @@ -43,6 +47,9 @@ func getCompletions(vm *goja.Runtime, line string) (results []string) { // and "x.y" is an object, obj will reference "x.y". obj := vm.GlobalObject() for i := 0; i < len(parts)-1; i++ { + if numerical.MatchString(parts[i]) { + return nil + } v := obj.Get(parts[i]) if v == nil || goja.IsNull(v) || goja.IsUndefined(v) { return nil // No object was found @@ -67,7 +74,11 @@ func getCompletions(vm *goja.Runtime, line string) (results []string) { // Append opening parenthesis (for functions) or dot (for objects) // if the line itself is the only completion. if len(results) == 1 && results[0] == line { - obj := obj.Get(parts[len(parts)-1]) + // Accessing the property will cause it to be evaluated. + // This can cause an error, e.g. in case of web3.eth.protocolVersion + // which has been dropped from geth. Ignore the error for autocompletion + // purposes. + obj := SafeGet(obj, parts[len(parts)-1]) if obj != nil { if _, isfunc := goja.AssertFunction(obj); isfunc { results[0] += "(" diff --git a/log/format.go b/log/format.go index 1763c5fd55..4b8e4543af 100644 --- a/log/format.go +++ b/log/format.go @@ -86,6 +86,7 @@ type TerminalStringer interface { // [DBUG] [May 16 20:58:45] remove route ns=haproxy addr=127.0.0.1:50002 func TerminalFormat(usecolor bool) Format { return FormatFunc(func(r *Record) []byte { + msg := escapeMessage(r.Msg) var color = 0 if usecolor { switch r.Lvl { @@ -122,19 +123,19 @@ func TerminalFormat(usecolor bool) Format { // Assemble and print the log heading if color > 0 { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s|%s]%s %s ", color, lvl, r.Time.Format(termTimeFormat), location, padding, r.Msg) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s|%s]%s %s ", color, lvl, r.Time.Format(termTimeFormat), location, padding, msg) } else { - fmt.Fprintf(b, "%s[%s|%s]%s %s ", lvl, r.Time.Format(termTimeFormat), location, padding, r.Msg) + fmt.Fprintf(b, "%s[%s|%s]%s %s ", lvl, r.Time.Format(termTimeFormat), location, padding, msg) } } else { if color > 0 { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), msg) } else { - fmt.Fprintf(b, "%s[%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg) + fmt.Fprintf(b, "%s[%s] %s ", lvl, r.Time.Format(termTimeFormat), msg) } } // try to justify the log output for short messages - length := utf8.RuneCountInString(r.Msg) + length := utf8.RuneCountInString(msg) if len(r.Ctx) > 0 && length < termMsgJust { b.Write(bytes.Repeat([]byte{' '}, termMsgJust-length)) } @@ -167,6 +168,8 @@ func logfmt(buf *bytes.Buffer, ctx []interface{}, color int, term bool) { v := formatLogfmtValue(ctx[i+1], term) if !ok { k, v = errorKey, formatLogfmtValue(k, term) + } else { + k = escapeString(k) } // XXX: we should probably check that all of your key bytes aren't invalid @@ -471,7 +474,7 @@ func formatLogfmtBigInt(n *big.Int) string { func escapeString(s string) string { needsQuoting := false for _, r := range s { - // We quote everything below " (0x34) and above~ (0x7E), plus equal-sign + // We quote everything below " (0x22) and above~ (0x7E), plus equal-sign if r <= '"' || r > '~' || r == '=' { needsQuoting = true break @@ -482,3 +485,26 @@ func escapeString(s string) string { } return strconv.Quote(s) } + +// escapeMessage checks if the provided string needs escaping/quoting, similarly +// to escapeString. The difference is that this method is more lenient: it allows +// for spaces and linebreaks to occur without needing quoting. +func escapeMessage(s string) string { + needsQuoting := false + for _, r := range s { + // Allow CR/LF/TAB. This is to make multi-line messages work. + if r == '\r' || r == '\n' || r == '\t' { + continue + } + // We quote everything below (0x20) and above~ (0x7E), + // plus equal-sign + if r < ' ' || r > '~' || r == '=' { + needsQuoting = true + break + } + } + if !needsQuoting { + return s + } + return strconv.Quote(s) +} diff --git a/log/format_test.go b/log/format_test.go index d7e0a95768..97584e72d1 100644 --- a/log/format_test.go +++ b/log/format_test.go @@ -93,3 +93,47 @@ func BenchmarkPrettyUint64Logfmt(b *testing.B) { sink = FormatLogfmtUint64(rand.Uint64()) } } + +func TestSanitation(t *testing.T) { + msg := "\u001b[1G\u001b[K\u001b[1A" + msg2 := "\u001b \u0000" + msg3 := "NiceMessage" + msg4 := "Space Message" + msg5 := "Enter\nMessage" + + for i, tt := range []struct { + msg string + want string + }{ + { + msg: msg, + want: fmt.Sprintf("] %q %q=%q\n", msg, msg, msg), + }, + { + msg: msg2, + want: fmt.Sprintf("] %q %q=%q\n", msg2, msg2, msg2), + }, + { + msg: msg3, + want: fmt.Sprintf("] %s %s=%s\n", msg3, msg3, msg3), + }, + { + msg: msg4, + want: fmt.Sprintf("] %s %q=%q\n", msg4, msg4, msg4), + }, + { + msg: msg5, + want: fmt.Sprintf("] %s %q=%q\n", msg5, msg5, msg5), + }, + } { + var ( + logger = New() + out = new(strings.Builder) + ) + logger.SetHandler(LvlFilterHandler(LvlInfo, StreamHandler(out, TerminalFormat(false)))) + logger.Info(tt.msg, tt.msg, tt.msg) + if have := out.String()[24:]; tt.want != have { + t.Fatalf("test %d: want / have: \n%v\n%v", i, tt.want, have) + } + } +} diff --git a/log/logger.go b/log/logger.go index 276d6969e2..1549e32854 100644 --- a/log/logger.go +++ b/log/logger.go @@ -46,7 +46,7 @@ func (l Lvl) AlignedString() string { } } -// Strings returns the name of a Lvl. +// String returns the name of a Lvl. func (l Lvl) String() string { switch l { case LvlTrace: diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go index 8f39c7f1e1..403b611c44 100644 --- a/metrics/influxdb/influxdb.go +++ b/metrics/influxdb/influxdb.go @@ -101,6 +101,8 @@ func (r *reporter) makeClient() (err error) { func (r *reporter) run() { intervalTicker := time.NewTicker(r.interval) pingTicker := time.NewTicker(time.Second * 5) + defer intervalTicker.Stop() + defer pingTicker.Stop() for { select { diff --git a/metrics/influxdb/influxdbv2.go b/metrics/influxdb/influxdbv2.go index 1ccc2dcf43..947b932bf3 100644 --- a/metrics/influxdb/influxdbv2.go +++ b/metrics/influxdb/influxdbv2.go @@ -70,6 +70,9 @@ func (r *v2Reporter) run() { intervalTicker := time.NewTicker(r.interval) pingTicker := time.NewTicker(time.Second * 5) + defer intervalTicker.Stop() + defer pingTicker.Stop() + for { select { case <-intervalTicker.C: diff --git a/metrics/librato/client.go b/metrics/librato/client.go index eebe20521b..729c2da9a9 100644 --- a/metrics/librato/client.go +++ b/metrics/librato/client.go @@ -80,7 +80,7 @@ func (c *LibratoClient) PostMetrics(batch Batch) (err error) { return } - if req, err = http.NewRequest("POST", MetricsPostUrl, bytes.NewBuffer(js)); err != nil { + if req, err = http.NewRequest(http.MethodPost, MetricsPostUrl, bytes.NewBuffer(js)); err != nil { return } diff --git a/miner/worker.go b/miner/worker.go index 5ee29560a8..a79d8ddb49 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -882,7 +882,7 @@ func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]* // generateParams wraps various of settings for generating sealing task. type generateParams struct { - timestamp uint64 // The timstamp for sealing task + timestamp uint64 // The timestamp for sealing task parent *types.Block // Parent block hash, empty means the latest chain head } @@ -1089,7 +1089,7 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti return err } - block, err := w.engine.Finalize(w.chain, env.header, env.state, env.txs, env.receipts) + block, err := w.engine.Finalize(w.chain, env.header, env.state, env.txs, env.receipts, nil) if err != nil { return err diff --git a/node/config.go b/node/config.go index ebe632bd0a..00838e6bbb 100644 --- a/node/config.go +++ b/node/config.go @@ -198,6 +198,11 @@ type Config struct { // JWTSecret is the path to the hex-encoded jwt secret. JWTSecret string `toml:",omitempty"` + + // EnablePersonal enables the deprecated personal namespace. + EnablePersonal bool `toml:"-"` + + DBEngine string `toml:",omitempty"` } // IPCEndpoint resolves an IPC endpoint based on a configured value, taking into diff --git a/node/defaults.go b/node/defaults.go index 69faaca480..a106daf657 100644 --- a/node/defaults.go +++ b/node/defaults.go @@ -65,6 +65,7 @@ var DefaultConfig = Config{ NAT: nat.Any(), MaxConsensusPeers: 40, }, + DBEngine: "", } // DefaultDataDir is the default data directory to use for the databases and other diff --git a/node/node.go b/node/node.go index 42544d4835..a24c49a4af 100644 --- a/node/node.go +++ b/node/node.go @@ -402,13 +402,25 @@ func (n *Node) obtainJWTSecret(cliParam string) ([]byte, error) { // startup. It's not meant to be called at any time afterwards as it makes certain // assumptions about the state of the node. func (n *Node) startRPC() error { - if err := n.startInProc(); err != nil { + // Filter out personal api + var apis []rpc.API + for _, api := range n.rpcAPIs { + if api.Namespace == "personal" { + if n.config.EnablePersonal { + log.Warn("Deprecated personal namespace activated") + } else { + continue + } + } + apis = append(apis, api) + } + if err := n.startInProc(apis); err != nil { return err } // Configure IPC. if n.ipc.endpoint != "" { - if err := n.ipc.start(n.rpcAPIs); err != nil { + if err := n.ipc.start(apis); err != nil { return err } } @@ -536,8 +548,8 @@ func (n *Node) stopRPC() { } // startInProc registers all RPC APIs on the inproc server. -func (n *Node) startInProc() error { - for _, api := range n.rpcAPIs { +func (n *Node) startInProc(apis []rpc.API) error { + for _, api := range apis { if err := n.inprocHandler.RegisterName(api.Namespace, api.Service); err != nil { return err } @@ -727,7 +739,14 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, r if n.config.DataDir == "" { db = rawdb.NewMemoryDatabase() } else { - db, err = rawdb.NewLevelDBDatabase(n.ResolvePath(name), cache, handles, namespace, readonly) + db, err = rawdb.Open(rawdb.OpenOptions{ + Type: n.config.DBEngine, + Directory: n.ResolvePath(name), + Namespace: namespace, + Cache: cache, + Handles: handles, + ReadOnly: readonly, + }) } if err == nil { @@ -753,7 +772,15 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient if n.config.DataDir == "" { db = rawdb.NewMemoryDatabase() } else { - db, err = rawdb.NewLevelDBDatabaseWithFreezer(n.ResolvePath(name), cache, handles, n.ResolveAncient(name, ancient), namespace, readonly) + db, err = rawdb.Open(rawdb.OpenOptions{ + Type: n.config.DBEngine, + Directory: n.ResolvePath(name), + AncientsDirectory: n.ResolveAncient(name, ancient), + Namespace: namespace, + Cache: cache, + Handles: handles, + ReadOnly: readonly, + }) } if err == nil { diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go index c5c1dde5ea..85e069e437 100644 --- a/node/rpcstack_test.go +++ b/node/rpcstack_test.go @@ -166,7 +166,7 @@ func TestWebsocketOrigins(t *testing.T) { // TestIsWebsocket tests if an incoming websocket upgrade request is handled properly. func TestIsWebsocket(t *testing.T) { - r, _ := http.NewRequest("GET", "/", nil) + r, _ := http.NewRequest(http.MethodGet, "/", nil) assert.False(t, isWebsocket(r)) r.Header.Set("upgrade", "websocket") @@ -274,7 +274,7 @@ func rpcRequest(t *testing.T, url string, extraHeaders ...string) *http.Response // Create the request. body := bytes.NewReader([]byte(`{"jsonrpc":"2.0","id":1,"method":"rpc_modules","params":[]}`)) - req, err := http.NewRequest("POST", url, body) + req, err := http.NewRequest(http.MethodPost, url, body) if err != nil { t.Fatal("could not create http request:", err) } diff --git a/p2p/consensus_dialed.go b/p2p/consensus_dialed.go index ad7ac4a7a2..4b88d2dc07 100644 --- a/p2p/consensus_dialed.go +++ b/p2p/consensus_dialed.go @@ -111,7 +111,7 @@ func (tasks *dialedTasks) size() int { } // clear queue -/*func (tasks *dialedTasks) clear() bool { +func (tasks *dialedTasks) clear() bool { if tasks.isEmpty() { log.Info("queue is empty!") return false @@ -121,7 +121,7 @@ func (tasks *dialedTasks) size() int { } tasks.queue = nil return true -}*/ +} // whether the queue is empty func (tasks *dialedTasks) isEmpty() bool { diff --git a/p2p/dial.go b/p2p/dial.go index bccbbf4b10..32a498c2f3 100644 --- a/p2p/dial.go +++ b/p2p/dial.go @@ -108,6 +108,7 @@ type dialScheduler struct { addconsensus chan *enode.Node removeconsensus chan *enode.Node + clearConsensus chan struct{} // Everything below here belongs to loop and // should only be accessed by code on the loop goroutine. @@ -185,6 +186,7 @@ func newDialScheduler(config dialConfig, it enode.Iterator, setupFunc dialSetupF remPeerCh: make(chan *conn), addconsensus: make(chan *enode.Node), removeconsensus: make(chan *enode.Node), + clearConsensus: make(chan struct{}), updateConsensusPeersCh: make(chan int), consensusPool: NewDialedTasks(config.MaxConsensusPeers*2, nil), } @@ -239,6 +241,13 @@ func (d *dialScheduler) removeConsensus(n *enode.Node) { } } +func (d *dialScheduler) closeConsensusDial() { + select { + case d.clearConsensus <- struct{}{}: + case <-d.ctx.Done(): + } +} + func (d *dialScheduler) removeConsensusFromQueue(n *enode.Node) { d.history.remove(string(n.ID().Bytes())) } @@ -346,6 +355,8 @@ loop: d.consensusPool.AddTask(newDialTask(node, dynDialedConn|consensusDialedConn)) case node := <-d.removeconsensus: d.consensusPool.RemoveTask(node.ID()) + case <-d.clearConsensus: + d.consensusPool.clear() case num := <-d.updateConsensusPeersCh: d.log.Debug("update added consensus peers num", "num", num) d.consensusPeers = num diff --git a/p2p/discover/v5wire/msg.go b/p2p/discover/v5wire/msg.go index 225fda7f34..84f2caa6d5 100644 --- a/p2p/discover/v5wire/msg.go +++ b/p2p/discover/v5wire/msg.go @@ -84,7 +84,7 @@ type ( ReqID []byte ENRSeq uint64 ToIP net.IP // These fields should mirror the UDP envelope address of the ping - ToPort uint16 // packet, which provides a way to discover the the external address (after NAT). + ToPort uint16 // packet, which provides a way to discover the external address (after NAT). } // FINDNODE is a query for nodes in the given bucket. diff --git a/p2p/dnsdisc/client_test.go b/p2p/dnsdisc/client_test.go index 44d62d6e75..0b94d32502 100644 --- a/p2p/dnsdisc/client_test.go +++ b/p2p/dnsdisc/client_test.go @@ -34,24 +34,25 @@ import ( "github.com/davecgh/go-spew/spew" ) -const ( - signingKeySeed = 0x111111 - nodesSeed1 = 0x2945237 - nodesSeed2 = 0x4567299 -) +var signingKeyForTesting, _ = crypto.ToECDSA(hexutil.MustDecode("0xdc599867fc513f8f5e2c2c9c489cde5e71362d1d9ec6e693e0de063236ed1240")) func TestClientSyncTree(t *testing.T) { - t.Skip("TestClientSyncTree") //todo test + nodes := []string{ + "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA", + "enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI", + "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o", + } + r := mapResolver{ "n": "enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA", "C7HRFPF3BLGF3YR4DY5KX3SMBE.n": "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org", "JWXYDBPXYWG6FX3GMDIBFA6CJ4.n": "enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24", - "2XS2367YHAXJFGLZHVAWLQD4ZY.n": "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA", - "H4FHT4B454P6UXFD7JCYQ5PWDY.n": "enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI", - "MHTDO6TMUBRIA2XWG5LUDACK24.n": "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o", + "2XS2367YHAXJFGLZHVAWLQD4ZY.n": nodes[0], + "H4FHT4B454P6UXFD7JCYQ5PWDY.n": nodes[1], + "MHTDO6TMUBRIA2XWG5LUDACK24.n": nodes[2], } var ( - wantNodes = testNodes(0x29452, 3) + wantNodes = sortByID(parseNodes(nodes)) wantLinks = []string{"enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"} wantSeq = uint(1) ) @@ -61,7 +62,7 @@ func TestClientSyncTree(t *testing.T) { if err != nil { t.Fatal("sync error:", err) } - if !reflect.DeepEqual(sortByID(stree.Nodes()), sortByID(wantNodes)) { + if !reflect.DeepEqual(sortByID(stree.Nodes()), wantNodes) { t.Errorf("wrong nodes in synced tree:\nhave %v\nwant %v", spew.Sdump(stree.Nodes()), spew.Sdump(wantNodes)) } if !reflect.DeepEqual(stree.Links(), wantLinks) { @@ -81,7 +82,7 @@ func TestClientSyncTreeBadNode(t *testing.T) { // tree, _ := MakeTree(3, nil, []string{"enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"}) // tree.entries[badHash] = &b // tree.root.eroot = badHash - // url, _ := tree.Sign(testKey(signingKeySeed), "n") + // url, _ := tree.Sign(signingKeyForTesting, "n") // fmt.Println(url) // fmt.Printf("%#v\n", tree.ToTXT("n")) @@ -100,9 +101,13 @@ func TestClientSyncTreeBadNode(t *testing.T) { // This test checks that randomIterator finds all entries. func TestIterator(t *testing.T) { - nodes := testNodes(nodesSeed1, 30) - tree, url := makeTestTree("n", nodes, nil) - r := mapResolver(tree.ToTXT("n")) + var ( + keys = testKeys(30) + nodes = testNodes(keys) + tree, url = makeTestTree("n", nodes, nil) + r = mapResolver(tree.ToTXT("n")) + ) + c := NewClient(Config{ Resolver: r, Logger: testlog.Logger(t, log.LvlTrace), @@ -115,6 +120,7 @@ func TestIterator(t *testing.T) { checkIterator(t, it, nodes) } + func TestIteratorCloseWithoutNext(t *testing.T) { tree1, url1 := makeTestTree("t1", nil, nil) c := NewClient(Config{Resolver: newMapResolver(tree1.ToTXT("t1"))}) @@ -132,8 +138,12 @@ func TestIteratorCloseWithoutNext(t *testing.T) { // This test checks if closing randomIterator races. func TestIteratorClose(t *testing.T) { - nodes := testNodes(nodesSeed1, 500) - tree1, url1 := makeTestTree("t1", nodes, nil) + var ( + keys = testKeys(500) + nodes = testNodes(keys) + tree1, url1 = makeTestTree("t1", nodes, nil) + ) + c := NewClient(Config{Resolver: newMapResolver(tree1.ToTXT("t1"))}) it, err := c.NewIterator(url1) if err != nil { @@ -155,9 +165,13 @@ func TestIteratorClose(t *testing.T) { // This test checks that randomIterator traverses linked trees as well as explicitly added trees. func TestIteratorLinks(t *testing.T) { - nodes := testNodes(nodesSeed1, 40) - tree1, url1 := makeTestTree("t1", nodes[:10], nil) - tree2, url2 := makeTestTree("t2", nodes[10:], []string{url1}) + var ( + keys = testKeys(40) + nodes = testNodes(keys) + tree1, url1 = makeTestTree("t1", nodes[:10], nil) + tree2, url2 = makeTestTree("t2", nodes[10:], []string{url1}) + ) + c := NewClient(Config{ Resolver: newMapResolver(tree1.ToTXT("t1"), tree2.ToTXT("t2")), Logger: testlog.Logger(t, log.LvlTrace), @@ -174,10 +188,10 @@ func TestIteratorLinks(t *testing.T) { // This test verifies that randomIterator re-checks the root of the tree to catch // updates to nodes. func TestIteratorNodeUpdates(t *testing.T) { - t.Skip() var ( clock = new(mclock.Simulated) - nodes = testNodes(nodesSeed1, 30) + keys = testKeys(30) + nodes = testNodes(keys) resolver = newMapResolver() c = NewClient(Config{ Resolver: resolver, @@ -198,7 +212,7 @@ func TestIteratorNodeUpdates(t *testing.T) { checkIterator(t, it, nodes[:25]) // Ensure RandomNode returns the new nodes after the tree is updated. - updateSomeNodes(nodesSeed1, nodes) + updateSomeNodes(keys, nodes) tree2, _ := makeTestTree("n", nodes, nil) resolver.clear() resolver.add(tree2.ToTXT("n")) @@ -212,10 +226,10 @@ func TestIteratorNodeUpdates(t *testing.T) { // requests have failed. The test is just like TestIteratorNodeUpdates, but // without advancing the clock by recheckInterval after the tree update. func TestIteratorRootRecheckOnFail(t *testing.T) { - t.Skip() var ( clock = new(mclock.Simulated) - nodes = testNodes(nodesSeed1, 30) + keys = testKeys(30) + nodes = testNodes(keys) resolver = newMapResolver() c = NewClient(Config{ Resolver: resolver, @@ -239,7 +253,7 @@ func TestIteratorRootRecheckOnFail(t *testing.T) { checkIterator(t, it, nodes[:25]) // Ensure RandomNode returns the new nodes after the tree is updated. - updateSomeNodes(nodesSeed1, nodes) + updateSomeNodes(keys, nodes) tree2, _ := makeTestTree("n", nodes, nil) resolver.clear() resolver.add(tree2.ToTXT("n")) @@ -252,7 +266,8 @@ func TestIteratorRootRecheckOnFail(t *testing.T) { func TestIteratorEmptyTree(t *testing.T) { var ( clock = new(mclock.Simulated) - nodes = testNodes(nodesSeed1, 1) + keys = testKeys(1) + nodes = testNodes(keys) resolver = newMapResolver() c = NewClient(Config{ Resolver: resolver, @@ -267,7 +282,7 @@ func TestIteratorEmptyTree(t *testing.T) { resolver.add(tree1.ToTXT("n")) // Start the iterator. - node := make(chan *enode.Node) + node := make(chan *enode.Node, 1) it, err := c.NewIterator(url) if err != nil { t.Fatal(err) @@ -296,8 +311,7 @@ func TestIteratorEmptyTree(t *testing.T) { } // updateSomeNodes applies ENR updates to some of the given nodes. -func updateSomeNodes(keySeed int64, nodes []*enode.Node) { - keys := testKeys(nodesSeed1, len(nodes)) +func updateSomeNodes(keys []*ecdsa.PrivateKey, nodes []*enode.Node) { for i, n := range nodes[:len(nodes)/2] { r := n.Record() r.Set(enr.IP{127, 0, 0, 1}) @@ -311,10 +325,10 @@ func updateSomeNodes(keySeed int64, nodes []*enode.Node) { // This test verifies that randomIterator re-checks the root of the tree to catch // updates to links. func TestIteratorLinkUpdates(t *testing.T) { - t.Skip("TestIteratorLinkUpdates") //todo test var ( clock = new(mclock.Simulated) - nodes = testNodes(nodesSeed1, 30) + keys = testKeys(30) + nodes = testNodes(keys) resolver = newMapResolver() c = NewClient(Config{ Resolver: resolver, @@ -387,7 +401,7 @@ func makeTestTree(domain string, nodes []*enode.Node, links []string) (*Tree, st if err != nil { panic(err) } - url, err := tree.Sign(testKey(signingKeySeed), domain) + url, err := tree.Sign(signingKeyForTesting, domain) if err != nil { panic(err) } @@ -395,11 +409,10 @@ func makeTestTree(domain string, nodes []*enode.Node, links []string) (*Tree, st } // testKeys creates deterministic private keys for testing. -func testKeys(seed int64, n int) []*ecdsa.PrivateKey { - rand := rand.New(rand.NewSource(seed)) +func testKeys(n int) []*ecdsa.PrivateKey { keys := make([]*ecdsa.PrivateKey, n) for i := 0; i < n; i++ { - key, err := ecdsa.GenerateKey(crypto.S256(), rand) + key, err := crypto.GenerateKey() if err != nil { panic("can't generate key: " + err.Error()) } @@ -408,13 +421,8 @@ func testKeys(seed int64, n int) []*ecdsa.PrivateKey { return keys } -func testKey(seed int64) *ecdsa.PrivateKey { - return testKeys(seed, 1)[0] -} - -func testNodes(seed int64, n int) []*enode.Node { - keys := testKeys(seed, n) - nodes := make([]*enode.Node, n) +func testNodes(keys []*ecdsa.PrivateKey) []*enode.Node { + nodes := make([]*enode.Node, len(keys)) for i, key := range keys { record := new(enr.Record) record.SetSeq(uint64(i)) @@ -428,10 +436,6 @@ func testNodes(seed int64, n int) []*enode.Node { return nodes } -func testNode(seed int64) *enode.Node { - return testNodes(seed, 1)[0] -} - type mapResolver map[string]string func newMapResolver(maps ...map[string]string) mapResolver { @@ -460,3 +464,15 @@ func (mr mapResolver) LookupTXT(ctx context.Context, name string) ([]string, err } return nil, errors.New("not found") } + +func parseNodes(rec []string) []*enode.Node { + var ns []*enode.Node + for _, r := range rec { + var n enode.Node + if err := n.UnmarshalText([]byte(r)); err != nil { + panic(err) + } + ns = append(ns, &n) + } + return ns +} diff --git a/p2p/dnsdisc/tree_test.go b/p2p/dnsdisc/tree_test.go index 92c9e5c084..22cabbdc97 100644 --- a/p2p/dnsdisc/tree_test.go +++ b/p2p/dnsdisc/tree_test.go @@ -61,8 +61,9 @@ func TestParseRoot(t *testing.T) { } func TestParseEntry(t *testing.T) { - t.Skip("TestParseEntry") //todo test - testkey := testKey(signingKeySeed) + testENRs := []string{"enr:-HW4QES8QIeXTYlDzbfr1WEzE-XKY4f8gJFJzjJL-9D7TC9lJb4Z3JPRRz1lP4pL_N_QpT6rGQjAU9Apnc-C1iMP36OAgmlkgnY0iXNlY3AyNTZrMaED5IdwfMxdmR8W37HqSFdQLjDkIwBd4Q_MjxgZifgKSdM"} + testNodes := parseNodes(testENRs) + tests := []struct { input string e entry @@ -92,7 +93,11 @@ func TestParseEntry(t *testing.T) { // Links { input: "enrtree://AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@nodes.example.org", - e: &linkEntry{"AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@nodes.example.org", "nodes.example.org", &testkey.PublicKey}, + e: &linkEntry{ + str: "AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@nodes.example.org", + domain: "nodes.example.org", + pubkey: &signingKeyForTesting.PublicKey, + }, }, { input: "enrtree://nodes.example.org", @@ -108,8 +113,8 @@ func TestParseEntry(t *testing.T) { }, // ENRs { - input: "enr:-HW4QES8QIeXTYlDzbfr1WEzE-XKY4f8gJFJzjJL-9D7TC9lJb4Z3JPRRz1lP4pL_N_QpT6rGQjAU9Apnc-C1iMP36OAgmlkgnY0iXNlY3AyNTZrMaED5IdwfMxdmR8W37HqSFdQLjDkIwBd4Q_MjxgZifgKSdM", - e: &enrEntry{node: testNode(nodesSeed1)}, + input: testENRs[0], + e: &enrEntry{node: testNodes[0]}, }, { input: "enr:-HW4QLZHjM4vZXkbp-5xJoHsKSbE7W39FPC8283X-y8oHcHPTnDDlIlzL5ArvDUlHZVDPgmFASrh7cWgLOLxj4wprRkHgmlkgnY0iXNlY3AyNTZrMaEC3t2jLMhDpCDX5mbSEwDn4L3iUfyXzoO8G28XvjGRkrAg=", @@ -133,7 +138,8 @@ func TestParseEntry(t *testing.T) { } func TestMakeTree(t *testing.T) { - nodes := testNodes(nodesSeed2, 50) + keys := testKeys(50) + nodes := testNodes(keys) tree, err := MakeTree(2, nodes, nil) if err != nil { t.Fatal(err) diff --git a/p2p/enode/nodedb_test.go b/p2p/enode/nodedb_test.go index b2d34d912e..7de34910d7 100644 --- a/p2p/enode/nodedb_test.go +++ b/p2p/enode/nodedb_test.go @@ -181,7 +181,7 @@ var nodeDBSeedQueryNodes = []struct { ), pong: time.Now().Add(-3 * time.Hour), }, - // This one shouldn't be in in the result set because its + // This one shouldn't be in the result set because its // nodeID is the local node's ID. { node: NewV4( diff --git a/p2p/server.go b/p2p/server.go index 79cf364571..1ce06becf9 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -399,6 +399,11 @@ func (srv *Server) RemoveConsensusPeer(node *enode.Node) { } } +func (srv *Server) CloseConsensusDial() { + srv.dialsched.closeConsensusDial() + srv.log.Info("Close consensus Dial") +} + // AddTrustedPeer adds the given node to a reserved whitelist which allows the // node to always connect, even if the slot are full. func (srv *Server) AddTrustedPeer(node *enode.Node) { diff --git a/p2p/simulations/http.go b/p2p/simulations/http.go index 77e66dc778..cb4b0bb374 100644 --- a/p2p/simulations/http.go +++ b/p2p/simulations/http.go @@ -104,7 +104,7 @@ type SubscribeOpts struct { // nodes and connections and filtering message events func (c *Client) SubscribeNetwork(events chan *Event, opts SubscribeOpts) (event.Subscription, error) { url := fmt.Sprintf("%s/events?current=%t&filter=%s", c.URL, opts.Current, opts.Filter) - req, err := http.NewRequest("GET", url, nil) + req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return nil, err } @@ -217,18 +217,18 @@ func (c *Client) RPCClient(ctx context.Context, nodeID string) (*rpc.Client, err // Get performs a HTTP GET request decoding the resulting JSON response // into "out" func (c *Client) Get(path string, out interface{}) error { - return c.Send("GET", path, nil, out) + return c.Send(http.MethodGet, path, nil, out) } // Post performs a HTTP POST request sending "in" as the JSON body and // decoding the resulting JSON response into "out" func (c *Client) Post(path string, in, out interface{}) error { - return c.Send("POST", path, in, out) + return c.Send(http.MethodPost, path, in, out) } // Delete performs a HTTP DELETE request func (c *Client) Delete(path string) error { - return c.Send("DELETE", path, nil, nil) + return c.Send(http.MethodDelete, path, nil, nil) } // Send performs a HTTP request, sending "in" as the JSON request body and diff --git a/params/config.go b/params/config.go index 8777926f1c..7f47d968e1 100644 --- a/params/config.go +++ b/params/config.go @@ -381,6 +381,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi return newCompatError("hubble fork block", c.HubbleBlock, newcfg.HubbleBlock) } + if isForkIncompatible(c.PauliBlock, newcfg.PauliBlock, head) { + return newCompatError("pauli fork block", c.PauliBlock, newcfg.PauliBlock) + } return nil } diff --git a/rpc/http.go b/rpc/http.go index b6cd1301cb..46b57ed5b1 100644 --- a/rpc/http.go +++ b/rpc/http.go @@ -207,7 +207,7 @@ func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) (io.ReadClos if err != nil { return nil, err } - req, err := http.NewRequestWithContext(ctx, "POST", hc.url, io.NopCloser(bytes.NewReader(body))) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, hc.url, io.NopCloser(bytes.NewReader(body))) if err != nil { return nil, err } diff --git a/rpc/ipc_unix.go b/rpc/ipc_unix.go index efb2e4f946..dc1cb4a268 100644 --- a/rpc/ipc_unix.go +++ b/rpc/ipc_unix.go @@ -31,8 +31,9 @@ import ( // ipcListen will create a Unix socket on the given endpoint. func ipcListen(endpoint string) (net.Listener, error) { - if len(endpoint) > int(max_path_size) { - log.Warn(fmt.Sprintf("The ipc endpoint is longer than %d characters. ", max_path_size), + // account for null-terminator too + if len(endpoint)+1 > int(max_path_size) { + log.Warn(fmt.Sprintf("The ipc endpoint is longer than %d characters. ", max_path_size-1), "endpoint", endpoint) } diff --git a/signer/core/apitypes/types.go b/signer/core/apitypes/types.go index 95d8678be0..b0238cc2a6 100644 --- a/signer/core/apitypes/types.go +++ b/signer/core/apitypes/types.go @@ -27,8 +27,6 @@ import ( "sort" "strconv" "strings" - "unicode" - "unicode/utf8" "github.com/PlatONnetwork/PlatON-Go/common/math" "github.com/PlatONnetwork/PlatON-Go/crypto" @@ -39,7 +37,7 @@ import ( "github.com/PlatONnetwork/PlatON-Go/core/types" ) -var typedDataReferenceTypeRegexp = regexp.MustCompile(`^[A-Z](\w*)(\[\])?$`) +var typedDataReferenceTypeRegexp = regexp.MustCompile(`^[A-Za-z](\w*)(\[\])?$`) type ValidationInfo struct { Typ string `json:"type"` @@ -225,15 +223,6 @@ func (t *Type) typeName() string { return t.Type } -func (t *Type) isReferenceType() bool { - if len(t.Type) == 0 { - return false - } - // Reference types must have a leading uppercase character - r, _ := utf8.DecodeRuneInString(t.Type) - return unicode.IsUpper(r) -} - type Types map[string][]Type type TypePriority struct { @@ -368,8 +357,8 @@ func (typedData *TypedData) EncodeData(primaryType string, data map[string]inter encType := field.Type encValue := data[field.Name] if encType[len(encType)-1:] == "]" { - arrayValue, ok := encValue.([]interface{}) - if !ok { + arrayValue, err := convertDataToSlice(encValue) + if err != nil { return nil, dataMismatchError(encType, encValue) } @@ -419,6 +408,14 @@ func (typedData *TypedData) EncodeData(primaryType string, data map[string]inter // Attempt to parse bytes in different formats: byte array, hex string, hexutil.Bytes. func parseBytes(encType interface{}) ([]byte, bool) { + // Handle array types. + val := reflect.ValueOf(encType) + if val.Kind() == reflect.Array && val.Type().Elem().Kind() == reflect.Uint8 { + v := reflect.MakeSlice(reflect.TypeOf([]byte{}), val.Len(), val.Len()) + reflect.Copy(v, val) + return v.Bytes(), true + } + switch v := encType.(type) { case []byte: return v, true @@ -459,6 +456,8 @@ func parseInteger(encType string, encValue interface{}) (*big.Int, error) { switch v := encValue.(type) { case *math.HexOrDecimal256: b = (*big.Int)(v) + case *big.Int: + b = v case string: var hexIntValue math.HexOrDecimal256 if err := hexIntValue.UnmarshalText([]byte(v)); err != nil { @@ -491,13 +490,23 @@ func parseInteger(encType string, encValue interface{}) (*big.Int, error) { func (typedData *TypedData) EncodePrimitiveValue(encType string, encValue interface{}, depth int) ([]byte, error) { switch encType { case "address": - stringValue, ok := encValue.(string) - if !ok || !common.IsHexAddress(stringValue) { - return nil, dataMismatchError(encType, encValue) - } retval := make([]byte, 32) - copy(retval[12:], common.HexToAddress(stringValue).Bytes()) - return retval, nil + switch val := encValue.(type) { + case string: + if common.IsHexAddress(val) { + copy(retval[12:], common.HexToAddress(val).Bytes()) + return retval, nil + } + case []byte: + if len(val) == 20 { + copy(retval[12:], val) + return retval, nil + } + case [20]byte: + copy(retval[12:], val[:]) + return retval, nil + } + return nil, dataMismatchError(encType, encValue) case "bool": boolValue, ok := encValue.(bool) if !ok { @@ -554,6 +563,19 @@ func dataMismatchError(encType string, encValue interface{}) error { return fmt.Errorf("provided data '%v' doesn't match type '%s'", encValue, encType) } +func convertDataToSlice(encValue interface{}) ([]interface{}, error) { + var outEncValue []interface{} + rv := reflect.ValueOf(encValue) + if rv.Kind() == reflect.Slice { + for i := 0; i < rv.Len(); i++ { + outEncValue = append(outEncValue, rv.Index(i).Interface()) + } + } else { + return outEncValue, fmt.Errorf("provided data '%v' is not slice", encValue) + } + return outEncValue, nil +} + // validate makes sure the types are sound func (typedData *TypedData) validate() error { if err := typedData.Types.validate(); err != nil { @@ -613,7 +635,7 @@ func (typedData *TypedData) formatData(primaryType string, data map[string]inter Typ: field.Type, } if field.isArray() { - arrayValue, _ := encValue.([]interface{}) + arrayValue, _ := convertDataToSlice(encValue) parsedType := field.typeName() for _, v := range arrayValue { if typedData.Types[parsedType] != nil { @@ -699,15 +721,15 @@ func (t Types) validate() error { if typeKey == typeObj.Type { return fmt.Errorf("type %q cannot reference itself", typeObj.Type) } - if typeObj.isReferenceType() { - if _, exist := t[typeObj.typeName()]; !exist { - return fmt.Errorf("reference type %q is undefined", typeObj.Type) - } - if !typedDataReferenceTypeRegexp.MatchString(typeObj.Type) { - return fmt.Errorf("unknown reference type %q", typeObj.Type) - } - } else if !isPrimitiveTypeValid(typeObj.Type) { - return fmt.Errorf("unknown type %q", typeObj.Type) + if isPrimitiveTypeValid(typeObj.Type) { + continue + } + // Must be reference type + if _, exist := t[typeObj.typeName()]; !exist { + return fmt.Errorf("reference type %q is undefined", typeObj.Type) + } + if !typedDataReferenceTypeRegexp.MatchString(typeObj.Type) { + return fmt.Errorf("unknown reference type %q", typeObj.Type) } } } diff --git a/signer/core/signed_data_test.go b/signer/core/signed_data_test.go index 57cafa411e..dcfddf7e0a 100644 --- a/signer/core/signed_data_test.go +++ b/signer/core/signed_data_test.go @@ -811,3 +811,153 @@ func TestComplexTypedData(t *testing.T) { t.Fatalf("Error, got %x, wanted %x", sighash, expSigHash) } } + +var complexTypedDataLCRefType = ` +{ + "types": { + "EIP712Domain": [ + { + "name": "chainId", + "type": "uint256" + }, + { + "name": "name", + "type": "string" + }, + { + "name": "verifyingContract", + "type": "address" + }, + { + "name": "version", + "type": "string" + } + ], + "Action": [ + { + "name": "action", + "type": "string" + }, + { + "name": "params", + "type": "string" + } + ], + "cCell": [ + { + "name": "capacity", + "type": "string" + }, + { + "name": "lock", + "type": "string" + }, + { + "name": "type", + "type": "string" + }, + { + "name": "data", + "type": "string" + }, + { + "name": "extraData", + "type": "string" + } + ], + "Transaction": [ + { + "name": "DAS_MESSAGE", + "type": "string" + }, + { + "name": "inputsCapacity", + "type": "string" + }, + { + "name": "outputsCapacity", + "type": "string" + }, + { + "name": "fee", + "type": "string" + }, + { + "name": "action", + "type": "Action" + }, + { + "name": "inputs", + "type": "cCell[]" + }, + { + "name": "outputs", + "type": "cCell[]" + }, + { + "name": "digest", + "type": "bytes32" + } + ] + }, + "primaryType": "Transaction", + "domain": { + "chainId": "56", + "name": "da.systems", + "verifyingContract": "0x0000000000000000000000000000000020210722", + "version": "1" + }, + "message": { + "DAS_MESSAGE": "SELL mobcion.bit FOR 100000 CKB", + "inputsCapacity": "1216.9999 CKB", + "outputsCapacity": "1216.9998 CKB", + "fee": "0.0001 CKB", + "digest": "0x53a6c0f19ec281604607f5d6817e442082ad1882bef0df64d84d3810dae561eb", + "action": { + "action": "start_account_sale", + "params": "0x00" + }, + "inputs": [ + { + "capacity": "218 CKB", + "lock": "das-lock,0x01,0x051c152f77f8efa9c7c6d181cc97ee67c165c506...", + "type": "account-cell-type,0x01,0x", + "data": "{ account: mobcion.bit, expired_at: 1670913958 }", + "extraData": "{ status: 0, records_hash: 0x55478d76900611eb079b22088081124ed6c8bae21a05dd1a0d197efcc7c114ce }" + } + ], + "outputs": [ + { + "capacity": "218 CKB", + "lock": "das-lock,0x01,0x051c152f77f8efa9c7c6d181cc97ee67c165c506...", + "type": "account-cell-type,0x01,0x", + "data": "{ account: mobcion.bit, expired_at: 1670913958 }", + "extraData": "{ status: 1, records_hash: 0x55478d76900611eb079b22088081124ed6c8bae21a05dd1a0d197efcc7c114ce }" + }, + { + "capacity": "201 CKB", + "lock": "das-lock,0x01,0x051c152f77f8efa9c7c6d181cc97ee67c165c506...", + "type": "account-sale-cell-type,0x01,0x", + "data": "0x1209460ef3cb5f1c68ed2c43a3e020eec2d9de6e...", + "extraData": "" + } + ] + } +} +` + +func TestComplexTypedDataWithLowercaseReftype(t *testing.T) { + var td apitypes.TypedData + err := json.Unmarshal([]byte(complexTypedDataLCRefType), &td) + if err != nil { + t.Fatalf("unmarshalling failed '%v'", err) + } + _, sighash, err := sign(td) + if err != nil { + t.Fatal(err) + } + expSigHash := common.FromHex("0x49191f910874f0148597204d9076af128d4694a7c4b714f1ccff330b87207bff") + if !bytes.Equal(expSigHash, sighash) { + t.Fatalf("Error, got %x, wanted %x", sighash, expSigHash) + } +} diff --git a/signer/core/uiapi.go b/signer/core/uiapi.go index cff598a313..43a7c90794 100644 --- a/signer/core/uiapi.go +++ b/signer/core/uiapi.go @@ -157,7 +157,6 @@ func (s *UIServerAPI) ChainId() math.HexOrDecimal64 { } // SetChainId sets the chain id to use when signing transactions. -// Example call to set Ropsten: // {"jsonrpc":"2.0","method":"clef_setChainId","params":["3"], "id":8} func (s *UIServerAPI) SetChainId(id math.HexOrDecimal64) math.HexOrDecimal64 { s.extApi.chainID = new(big.Int).SetUint64(uint64(id)) diff --git a/tests/solidity/truffle-config.js b/tests/solidity/truffle-config.js index a1b0ef0528..1e6fd2fb4e 100644 --- a/tests/solidity/truffle-config.js +++ b/tests/solidity/truffle-config.js @@ -57,18 +57,6 @@ // // from:
, // Account to send txs from (default: accounts[0]) // // websockets: true // Enable EventEmitter interface for web3 (default: false) // // }, -// -// // Useful for deploying to a public network. -// // NB: It's important to wrap the provider as a function. -// // ropsten: { -// // provider: () => new HDWalletProvider(mnemonic, `https://ropsten.infura.io/${infuraKey}`), -// // network_id: 3, // Ropsten's id -// // gas: 5500000, // Ropsten has a lower block limit than mainnet -// // confirmations: 2, // # of confs to wait between deployments. (default: 0) -// // timeoutBlocks: 200, // # of blocks before a deployment times out (minimum/default: 50) -// // skipDryRun: true // Skip dry run before migrations? (default: false for public nets ) -// // }, -// // // Useful for private networks // // private: { // // provider: () => new HDWalletProvider(mnemonic, `https://network.io`), diff --git a/trie/database.go b/trie/database.go index b774e956a1..65cfa3c3f7 100644 --- a/trie/database.go +++ b/trie/database.go @@ -73,7 +73,7 @@ var ( // behind this split design is to provide read access to RPC handlers and sync // servers even while the trie is executing expensive garbage collection. type Database struct { - diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes + diskdb ethdb.Database // Persistent storage for matured trie nodes freshNodes map[common.Hash]struct{} @@ -174,7 +174,7 @@ func (n *cachedNode) rlp() []byte { // or by regenerating it from the rlp encoded blob. func (n *cachedNode) obj(hash common.Hash) node { if node, ok := n.node.(rawNode); ok { - // The raw-blob format nodes are loaded from either from + // The raw-blob format nodes are loaded either from the // clean cache or the database, they are all in their own // copy and safe to use unsafe decoder. return mustDecodeNodeUnsafe(hash[:], node) @@ -287,14 +287,14 @@ type Config struct { // NewDatabase creates a new trie database to store ephemeral trie content before // its written out to disk or garbage collected. No read cache is created, so all // data retrievals will hit the underlying disk database. -func NewDatabase(diskdb ethdb.KeyValueStore) *Database { +func NewDatabase(diskdb ethdb.Database) *Database { return NewDatabaseWithConfig(diskdb, nil) } // NewDatabaseWithConfig creates a new trie database to store ephemeral trie content // before its written out to disk or garbage collected. It also acts as a read cache // for nodes loaded from disk. -func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database { +func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database { var cleans *fastcache.Cache if config != nil && config.Cache > 0 { if config.Journal == "" { @@ -437,7 +437,7 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) { memcacheDirtyMissMeter.Mark(1) // Content unavailable in memory, attempt to retrieve from disk - enc := rawdb.ReadTrieNode(db.diskdb, hash) + enc := rawdb.ReadLegacyTrieNode(db.diskdb, hash) if len(enc) != 0 { if db.cleans != nil { db.cleans.Set(hash[:], enc) @@ -756,7 +756,7 @@ func (db *Database) Cap(limit common.StorageSize) error { for size > limit && oldest != (common.Hash{}) { // Fetch the oldest referenced node and push into the batch node := db.dirties[oldest] - rawdb.WriteTrieNode(batch, oldest, node.rlp()) + rawdb.WriteLegacyTrieNode(batch, oldest, node.rlp()) // If we exceeded the ideal batch size, commit and reset if batch.ValueSize() >= ethdb.IdealBatchSize { @@ -896,7 +896,7 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane return err } // If we've reached an optimal batch size, commit and start over - rawdb.WriteTrieNode(batch, hash, node.rlp()) + rawdb.WriteLegacyTrieNode(batch, hash, node.rlp()) if batch.ValueSize() >= ethdb.IdealBatchSize { if err := batch.Write(); err != nil { @@ -1036,3 +1036,8 @@ func (db *Database) CommitPreimages() error { } return db.preimages.commit(true) } + +// Scheme returns the node scheme used in the database. +func (db *Database) Scheme() string { + return rawdb.HashScheme +} diff --git a/trie/database_test.go b/trie/database_test.go index e24bfe4c9a..d41516e0fc 100644 --- a/trie/database_test.go +++ b/trie/database_test.go @@ -17,16 +17,16 @@ package trie import ( + "github.com/PlatONnetwork/PlatON-Go/core/rawdb" "testing" "github.com/PlatONnetwork/PlatON-Go/common" - "github.com/PlatONnetwork/PlatON-Go/ethdb/memorydb" ) // Tests that the trie database returns a missing trie node error if attempting // to retrieve the meta root. func TestDatabaseMetarootFetch(t *testing.T) { - db := NewDatabase(memorydb.New()) + db := NewDatabase(rawdb.NewMemoryDatabase()) if _, err := db.Node(common.Hash{}); err == nil { t.Fatalf("metaroot retrieval succeeded") } diff --git a/trie/iterator.go b/trie/iterator.go index dc3f727fa0..79059ae667 100644 --- a/trie/iterator.go +++ b/trie/iterator.go @@ -21,11 +21,16 @@ import ( "container/heap" "errors" - "github.com/PlatONnetwork/PlatON-Go/ethdb" - "github.com/PlatONnetwork/PlatON-Go/common" ) +// NodeResolver is used for looking up trie nodes before reaching into the real +// persistent layer. This is not mandatory, rather is an optimization for cases +// where trie nodes can be recovered from some external mechanism without reading +// from disk. In those cases, this resolver allows short circuiting accesses and +// returning them from memory. +type NodeResolver func(owner common.Hash, path []byte, hash common.Hash) []byte + // Iterator is a key-value trie iterator that traverses a Trie. type Iterator struct { nodeIt NodeIterator @@ -108,8 +113,8 @@ type NodeIterator interface { // to the value after calling Next. LeafProof() [][]byte - // AddResolver sets an intermediate database to use for looking up trie nodes - // before reaching into the real persistent layer. + // AddResolver sets a node resolver to use for looking up trie nodes before + // reaching into the real persistent layer. // // This is not required for normal operation, rather is an optimization for // cases where trie nodes can be recovered from some external mechanism without @@ -119,7 +124,7 @@ type NodeIterator interface { // Before adding a similar mechanism to any other place in Geth, consider // making trie.Database an interface and wrapping at that level. It's a huge // refactor, but it could be worth it if another occurrence arises. - AddResolver(ethdb.KeyValueReader) + AddResolver(NodeResolver) } // nodeIteratorState represents the iteration state at one particular node of the @@ -138,7 +143,7 @@ type nodeIterator struct { path []byte // Path to the current node err error // Failure set in case of an internal error in the iterator - resolver ethdb.KeyValueReader // Optional intermediate resolver above the disk layer + resolver NodeResolver // optional node resolver for avoiding disk hits } // errIteratorEnd is stored in nodeIterator.err when iteration is done. @@ -166,7 +171,7 @@ func newNodeIterator(trie *Trie, start []byte) NodeIterator { return it } -func (it *nodeIterator) AddResolver(resolver ethdb.KeyValueReader) { +func (it *nodeIterator) AddResolver(resolver NodeResolver) { it.resolver = resolver } @@ -371,7 +376,7 @@ func (it *nodeIterator) peekSeek(seekKey []byte) (*nodeIteratorState, *int, []by func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) { if it.resolver != nil { - if blob, err := it.resolver.Get(hash); err == nil && len(blob) > 0 { + if blob := it.resolver(it.trie.owner, path, common.BytesToHash(hash)); len(blob) > 0 { if resolved, err := decodeNode(hash, blob); err == nil { return resolved, nil } @@ -383,7 +388,7 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) { func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) { if it.resolver != nil { - if blob, err := it.resolver.Get(hash); err == nil && len(blob) > 0 { + if blob := it.resolver(it.trie.owner, path, common.BytesToHash(hash)); len(blob) > 0 { return blob, nil } } @@ -582,7 +587,7 @@ func (it *differenceIterator) NodeBlob() []byte { return it.b.NodeBlob() } -func (it *differenceIterator) AddResolver(resolver ethdb.KeyValueReader) { +func (it *differenceIterator) AddResolver(resolver NodeResolver) { panic("not implemented") } @@ -697,7 +702,7 @@ func (it *unionIterator) NodeBlob() []byte { return (*it.items)[0].NodeBlob() } -func (it *unionIterator) AddResolver(resolver ethdb.KeyValueReader) { +func (it *unionIterator) AddResolver(resolver NodeResolver) { panic("not implemented") } diff --git a/trie/iterator_test.go b/trie/iterator_test.go index 164f91d2e9..bbaec611d9 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -311,7 +311,7 @@ func TestIteratorContinueAfterErrorDisk(t *testing.T) { testIteratorContinueA func TestIteratorContinueAfterErrorMemonly(t *testing.T) { testIteratorContinueAfterError(t, true) } func testIteratorContinueAfterError(t *testing.T, memonly bool) { - diskdb := memorydb.New() + diskdb := rawdb.NewMemoryDatabase() triedb := NewDatabase(diskdb) tr := NewEmpty(triedb) @@ -320,7 +320,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) { } tr.Commit(nil) if !memonly { - triedb.Commit(tr.Hash(), true, true) + triedb.Commit(tr.Hash(), false, true) } wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil) @@ -402,7 +402,7 @@ func TestIteratorContinueAfterSeekErrorMemonly(t *testing.T) { func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) { // Commit test trie to db, then remove the node containing "bars". - diskdb := memorydb.New() + diskdb := rawdb.NewMemoryDatabase() triedb := NewDatabase(diskdb) ctr := NewEmpty(triedb) @@ -411,7 +411,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) { } root, _, _ := ctr.Commit(nil) if !memonly { - triedb.Commit(root, true, true) + triedb.Commit(root, false, true) } barNodeHash := common.HexToHash("05041990364eb72fcb1127652ce40d8bab765f2bfe53225b1170d276cc101c2e") var ( @@ -514,7 +514,7 @@ func (l *loggingDb) Close() error { func makeLargeTestTrie() (*Database, *StateTrie, *loggingDb) { // Create an empty trie logDb := &loggingDb{0, memorydb.New()} - triedb := NewDatabase(logDb) + triedb := NewDatabase(rawdb.NewDatabase(logDb)) trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, triedb) // Fill it with some arbitrary data @@ -551,7 +551,7 @@ func TestNodeIteratorLargeTrie(t *testing.T) { func TestIteratorNodeBlob(t *testing.T) { var ( - db = memorydb.New() + db = rawdb.NewMemoryDatabase() triedb = NewDatabase(db) trie = NewEmpty(triedb) ) diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go index 29ab822dc4..d9bae57f4a 100644 --- a/trie/secure_trie_test.go +++ b/trie/secure_trie_test.go @@ -18,25 +18,24 @@ package trie import ( "bytes" + "github.com/PlatONnetwork/PlatON-Go/core/rawdb" "runtime" "sync" "testing" - "github.com/PlatONnetwork/PlatON-Go/ethdb/memorydb" - "github.com/PlatONnetwork/PlatON-Go/common" "github.com/PlatONnetwork/PlatON-Go/crypto" ) func newEmptySecure() *StateTrie { - trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, NewDatabase(memorydb.New())) + trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase())) return trie } // makeTestStateTrie creates a large enough secure trie for testing. func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { // Create an empty trie - triedb := NewDatabase(memorydb.New()) + triedb := NewDatabase(rawdb.NewMemoryDatabase()) trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, triedb) // Fill it with some arbitrary data diff --git a/trie/stacktrie.go b/trie/stacktrie.go index ff2c3c10c8..6195e0a8e3 100644 --- a/trie/stacktrie.go +++ b/trie/stacktrie.go @@ -25,7 +25,6 @@ import ( "sync" "github.com/PlatONnetwork/PlatON-Go/common" - "github.com/PlatONnetwork/PlatON-Go/ethdb" "github.com/PlatONnetwork/PlatON-Go/log" ) @@ -37,10 +36,14 @@ var stPool = sync.Pool{ }, } -func stackTrieFromPool(db ethdb.KeyValueWriter, owner common.Hash) *StackTrie { +// NodeWriteFunc is used to provide all information of a dirty node for committing +// so that callers can flush nodes into database with desired scheme. +type NodeWriteFunc = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) + +func stackTrieFromPool(writeFn NodeWriteFunc, owner common.Hash) *StackTrie { st := stPool.Get().(*StackTrie) - st.db = db st.owner = owner + st.writeFn = writeFn return st } @@ -53,41 +56,41 @@ func returnToPool(st *StackTrie) { // in order. Once it determines that a subtree will no longer be inserted // into, it will hash it and free up the memory it uses. type StackTrie struct { - owner common.Hash // the owner of the trie - nodeType uint8 // node type (as in branch, ext, leaf) - val []byte // value contained by this node if it's a leaf - key []byte // key chunk covered by this (leaf|ext) node - children [16]*StackTrie // list of children (for branch and exts) - db ethdb.KeyValueWriter // Pointer to the commit db, can be nil + owner common.Hash // the owner of the trie + nodeType uint8 // node type (as in branch, ext, leaf) + val []byte // value contained by this node if it's a leaf + key []byte // key chunk covered by this (leaf|ext) node + children [16]*StackTrie // list of children (for branch and exts) + writeFn NodeWriteFunc // function for committing nodes, can be nil } // NewStackTrie allocates and initializes an empty trie. -func NewStackTrie(db ethdb.KeyValueWriter) *StackTrie { +func NewStackTrie(writeFn NodeWriteFunc) *StackTrie { return &StackTrie{ nodeType: emptyNode, - db: db, + writeFn: writeFn, } } // NewStackTrieWithOwner allocates and initializes an empty trie, but with // the additional owner field. -func NewStackTrieWithOwner(db ethdb.KeyValueWriter, owner common.Hash) *StackTrie { +func NewStackTrieWithOwner(writeFn NodeWriteFunc, owner common.Hash) *StackTrie { return &StackTrie{ owner: owner, nodeType: emptyNode, - db: db, + writeFn: writeFn, } } // NewFromBinary initialises a serialized stacktrie with the given db. -func NewFromBinary(data []byte, db ethdb.KeyValueWriter) (*StackTrie, error) { +func NewFromBinary(data []byte, writeFn NodeWriteFunc) (*StackTrie, error) { var st StackTrie if err := st.UnmarshalBinary(data); err != nil { return nil, err } // If a database is used, we need to recursively add it to every child - if db != nil { - st.setDb(db) + if writeFn != nil { + st.setWriter(writeFn) } return &st, nil } @@ -100,7 +103,7 @@ func (st *StackTrie) MarshalBinary() (data []byte, err error) { ) if err := gob.NewEncoder(w).Encode(struct { Owner common.Hash - Nodetype uint8 + NodeType uint8 Val []byte Key []byte }{ @@ -136,13 +139,13 @@ func (st *StackTrie) UnmarshalBinary(data []byte) error { func (st *StackTrie) unmarshalBinary(r io.Reader) error { var dec struct { Owner common.Hash - Nodetype uint8 + NodeType uint8 Val []byte Key []byte } gob.NewDecoder(r).Decode(&dec) st.owner = dec.Owner - st.nodeType = dec.Nodetype + st.nodeType = dec.NodeType st.val = dec.Val st.key = dec.Key @@ -160,25 +163,25 @@ func (st *StackTrie) unmarshalBinary(r io.Reader) error { return nil } -func (st *StackTrie) setDb(db ethdb.KeyValueWriter) { - st.db = db +func (st *StackTrie) setWriter(writeFn NodeWriteFunc) { + st.writeFn = writeFn for _, child := range st.children { if child != nil { - child.setDb(db) + child.setWriter(writeFn) } } } -func newLeaf(owner common.Hash, key, val []byte, db ethdb.KeyValueWriter) *StackTrie { - st := stackTrieFromPool(db, owner) +func newLeaf(owner common.Hash, key, val []byte, writeFn NodeWriteFunc) *StackTrie { + st := stackTrieFromPool(writeFn, owner) st.nodeType = leafNode st.key = append(st.key, key...) st.val = val return st } -func newExt(owner common.Hash, key []byte, child *StackTrie, db ethdb.KeyValueWriter) *StackTrie { - st := stackTrieFromPool(db, owner) +func newExt(owner common.Hash, key []byte, child *StackTrie, writeFn NodeWriteFunc) *StackTrie { + st := stackTrieFromPool(writeFn, owner) st.nodeType = extNode st.key = append(st.key, key...) st.children[0] = child @@ -200,7 +203,7 @@ func (st *StackTrie) TryUpdate(key, value []byte) error { if len(value) == 0 { panic("deletion not supported") } - st.insert(k[:len(k)-1], value) + st.insert(k[:len(k)-1], value, nil) return nil } @@ -212,7 +215,7 @@ func (st *StackTrie) Update(key, value []byte) { func (st *StackTrie) Reset() { st.owner = common.Hash{} - st.db = nil + st.writeFn = nil st.key = st.key[:0] st.val = nil for i := range st.children { @@ -235,7 +238,7 @@ func (st *StackTrie) getDiffIndex(key []byte) int { // Helper function to that inserts a (key, value) pair into // the trie. -func (st *StackTrie) insert(key, value []byte) { +func (st *StackTrie) insert(key, value []byte, prefix []byte) { switch st.nodeType { case branchNode: /* Branch */ idx := int(key[0]) @@ -244,7 +247,7 @@ func (st *StackTrie) insert(key, value []byte) { for i := idx - 1; i >= 0; i-- { if st.children[i] != nil { if st.children[i].nodeType != hashedNode { - st.children[i].hash() + st.children[i].hash(append(prefix, byte(i))) } break } @@ -252,9 +255,9 @@ func (st *StackTrie) insert(key, value []byte) { // Add new child if st.children[idx] == nil { - st.children[idx] = newLeaf(st.owner, key[1:], value, st.db) + st.children[idx] = newLeaf(st.owner, key[1:], value, st.writeFn) } else { - st.children[idx].insert(key[1:], value) + st.children[idx].insert(key[1:], value, append(prefix, key[0])) } case extNode: /* Ext */ @@ -269,7 +272,7 @@ func (st *StackTrie) insert(key, value []byte) { if diffidx == len(st.key) { // Ext key and key segment are identical, recurse into // the child node. - st.children[0].insert(key[diffidx:], value) + st.children[0].insert(key[diffidx:], value, append(prefix, key[:diffidx]...)) return } // Save the original part. Depending if the break is @@ -278,14 +281,19 @@ func (st *StackTrie) insert(key, value []byte) { // node directly. var n *StackTrie if diffidx < len(st.key)-1 { - n = newExt(st.owner, st.key[diffidx+1:], st.children[0], st.db) + // Break on the non-last byte, insert an intermediate + // extension. The path prefix of the newly-inserted + // extension should also contain the different byte. + n = newExt(st.owner, st.key[diffidx+1:], st.children[0], st.writeFn) + n.hash(append(prefix, st.key[:diffidx+1]...)) } else { // Break on the last byte, no need to insert - // an extension node: reuse the current node + // an extension node: reuse the current node. + // The path prefix of the original part should + // still be same. n = st.children[0] + n.hash(append(prefix, st.key...)) } - // Convert to hash - n.hash() var p *StackTrie if diffidx == 0 { // the break is on the first byte, so @@ -298,12 +306,12 @@ func (st *StackTrie) insert(key, value []byte) { // the common prefix is at least one byte // long, insert a new intermediate branch // node. - st.children[0] = stackTrieFromPool(st.db, st.owner) + st.children[0] = stackTrieFromPool(st.writeFn, st.owner) st.children[0].nodeType = branchNode p = st.children[0] } // Create a leaf for the inserted part - o := newLeaf(st.owner, key[diffidx+1:], value, st.db) + o := newLeaf(st.owner, key[diffidx+1:], value, st.writeFn) // Insert both child leaves where they belong: origIdx := st.key[diffidx] @@ -339,7 +347,7 @@ func (st *StackTrie) insert(key, value []byte) { // Convert current node into an ext, // and insert a child branch node. st.nodeType = extNode - st.children[0] = NewStackTrieWithOwner(st.db, st.owner) + st.children[0] = NewStackTrieWithOwner(st.writeFn, st.owner) st.children[0].nodeType = branchNode p = st.children[0] } @@ -348,11 +356,11 @@ func (st *StackTrie) insert(key, value []byte) { // value and another containing the new value. The child leaf // is hashed directly in order to free up some memory. origIdx := st.key[diffidx] - p.children[origIdx] = newLeaf(st.owner, st.key[diffidx+1:], st.val, st.db) - p.children[origIdx].hash() + p.children[origIdx] = newLeaf(st.owner, st.key[diffidx+1:], st.val, st.writeFn) + p.children[origIdx].hash(append(prefix, st.key[:diffidx+1]...)) newIdx := key[diffidx] - p.children[newIdx] = newLeaf(st.owner, key[diffidx+1:], value, st.db) + p.children[newIdx] = newLeaf(st.owner, key[diffidx+1:], value, st.writeFn) // Finally, cut off the key part that has been passed // over to the children. @@ -383,14 +391,14 @@ func (st *StackTrie) insert(key, value []byte) { // - And the 'st.type' will be 'hashedNode' AGAIN // // This method also sets 'st.type' to hashedNode, and clears 'st.key'. -func (st *StackTrie) hash() { +func (st *StackTrie) hash(path []byte) { h := newHasher() defer returnHasherToPool(h) - st.hashRec(h) + st.hashRec(h, path) } -func (st *StackTrie) hashRec(hasher *hasher) { +func (st *StackTrie) hashRec(hasher *hasher, path []byte) { // The switch below sets this to the RLP-encoding of this node. var encodedNode []byte @@ -411,8 +419,7 @@ func (st *StackTrie) hashRec(hasher *hasher) { nodes[i] = nilValueNode continue } - - child.hashRec(hasher) + child.hashRec(hasher, append(path, byte(i))) if len(child.val) < 32 { nodes[i] = rawNode(child.val) } else { @@ -428,10 +435,9 @@ func (st *StackTrie) hashRec(hasher *hasher) { encodedNode = hasher.encodedBytes() case extNode: - st.children[0].hashRec(hasher) + st.children[0].hashRec(hasher, append(path, st.key...)) - sz := hexToCompactInPlace(st.key) - n := rawShortNode{Key: st.key[:sz]} + n := rawShortNode{Key: hexToCompact(st.key)} if len(st.children[0].val) < 32 { n.Val = rawNode(st.children[0].val) } else { @@ -447,8 +453,7 @@ func (st *StackTrie) hashRec(hasher *hasher) { case leafNode: st.key = append(st.key, byte(16)) - sz := hexToCompactInPlace(st.key) - n := rawShortNode{Key: st.key[:sz], Val: valueNode(st.val)} + n := rawShortNode{Key: hexToCompact(st.key), Val: valueNode(st.val)} n.encode(hasher.encbuf) encodedNode = hasher.encodedBytes() @@ -467,10 +472,8 @@ func (st *StackTrie) hashRec(hasher *hasher) { // Write the hash to the 'val'. We allocate a new val here to not mutate // input values st.val = hasher.hashData(encodedNode) - if st.db != nil { - // TODO! Is it safe to Put the slice here? - // Do all db implementations copy the value provided? - st.db.Put(st.val, encodedNode) + if st.writeFn != nil { + st.writeFn(st.owner, path, common.BytesToHash(st.val), encodedNode) } } @@ -479,12 +482,11 @@ func (st *StackTrie) Hash() (h common.Hash) { hasher := newHasher() defer returnHasherToPool(hasher) - st.hashRec(hasher) + st.hashRec(hasher, nil) if len(st.val) == 32 { copy(h[:], st.val) return h } - // If the node's RLP isn't 32 bytes long, the node will not // be hashed, and instead contain the rlp-encoding of the // node. For the top level node, we need to force the hashing. @@ -494,7 +496,7 @@ func (st *StackTrie) Hash() (h common.Hash) { return h } -// Commit will firstly hash the entrie trie if it's still not hashed +// Commit will firstly hash the entire trie if it's still not hashed // and then commit all nodes to the associated database. Actually most // of the trie nodes MAY have been committed already. The main purpose // here is to commit the root node. @@ -502,25 +504,24 @@ func (st *StackTrie) Hash() (h common.Hash) { // The associated database is expected, otherwise the whole commit // functionality should be disabled. func (st *StackTrie) Commit() (h common.Hash, err error) { - if st.db == nil { + if st.writeFn == nil { return common.Hash{}, ErrCommitDisabled } - hasher := newHasher() defer returnHasherToPool(hasher) - st.hashRec(hasher) + st.hashRec(hasher, nil) if len(st.val) == 32 { copy(h[:], st.val) return h, nil } - // If the node's RLP isn't 32 bytes long, the node will not - // be hashed (and committed), and instead contain the rlp-encoding of the + // be hashed (and committed), and instead contain the rlp-encoding of the // node. For the top level node, we need to force the hashing+commit. hasher.sha.Reset() hasher.sha.Write(st.val) hasher.sha.Read(h[:]) - st.db.Put(h[:], st.val) + + st.writeFn(st.owner, nil, h, st.val) return h, nil } diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go index 2ace5999b7..af2a6d15b4 100644 --- a/trie/stacktrie_test.go +++ b/trie/stacktrie_test.go @@ -18,12 +18,12 @@ package trie import ( "bytes" + "github.com/PlatONnetwork/PlatON-Go/core/rawdb" "math/big" "testing" "github.com/PlatONnetwork/PlatON-Go/common" "github.com/PlatONnetwork/PlatON-Go/crypto" - "github.com/PlatONnetwork/PlatON-Go/ethdb/memorydb" ) func TestStackTrieInsertAndHash(t *testing.T) { @@ -185,9 +185,10 @@ func TestStackTrieInsertAndHash(t *testing.T) { } } } + func TestSizeBug(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(memorydb.New())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -202,7 +203,7 @@ func TestSizeBug(t *testing.T) { func TestEmptyBug(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(memorydb.New())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -228,7 +229,7 @@ func TestEmptyBug(t *testing.T) { func TestValLength56(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(memorydb.New())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -253,8 +254,7 @@ func TestValLength56(t *testing.T) { // which causes a lot of node-within-node. This case was found via fuzzing. func TestUpdateSmallNodes(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(memorydb.New())) - + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) kvs := []struct { K string V string @@ -282,8 +282,7 @@ func TestUpdateSmallNodes(t *testing.T) { func TestUpdateVariableKeys(t *testing.T) { t.SkipNow() st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(memorydb.New())) - + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) kvs := []struct { K string V string @@ -352,7 +351,7 @@ func TestStacktrieNotModifyValues(t *testing.T) { func TestStacktrieSerialization(t *testing.T) { var ( st = NewStackTrie(nil) - nt = NewEmpty(NewDatabase(memorydb.New())) + nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) keyB = big.NewInt(1) keyDelta = big.NewInt(1) vals [][]byte diff --git a/trie/sync.go b/trie/sync.go index 4a3b8124b5..13645a7fff 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -65,7 +65,7 @@ type SyncPath [][]byte // version that can be sent over the network. func NewSyncPath(path []byte) SyncPath { // If the hash is from the account trie, append a single item, if it - // is from the a storage trie, append a tuple. Note, the length 64 is + // is from a storage trie, append a tuple. Note, the length 64 is // clashing between account leaf and storage root. It's fine though // because having a trie node at 64 depth means a hash collision was // found and we're long dead. @@ -75,6 +75,22 @@ func NewSyncPath(path []byte) SyncPath { return SyncPath{hexToKeybytes(path[:64]), hexToCompact(path[64:])} } +// LeafCallback is a callback type invoked when a trie operation reaches a leaf +// node. +// +// The keys is a path tuple identifying a particular trie node either in a single +// trie (account) or a layered trie (account -> storage). Each key in the tuple +// is in the raw format(32 bytes). +// +// The path is a composite hexary path identifying the trie node. All the key +// bytes are converted to the hexary nibbles and composited with the parent path +// if the trie node is in a layered trie. +// +// It's used by state sync and commit to allow handling external references +// between account and storage tries. And also it's used in the state healing +// for extracting the raw states(leaf nodes) with corresponding paths. +type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error + // nodeRequest represents a scheduled or already in-flight trie node retrieval request. type nodeRequest struct { hash common.Hash // Hash of the trie node to retrieve @@ -140,22 +156,24 @@ func (batch *syncMemBatch) hasCode(hash common.Hash) bool { // unknown trie hashes to retrieve, accepts node data associated with said hashes // and reconstructs the trie step by step until all is done. type Sync struct { + scheme string // Node scheme descriptor used in database. database ethdb.KeyValueReader // Persistent database to check for existing entries membatch *syncMemBatch // Memory buffer to avoid frequent database writes nodeReqs map[string]*nodeRequest // Pending requests pertaining to a trie node path codeReqs map[common.Hash]*codeRequest // Pending requests pertaining to a code hash - queue *prque.Prque // Priority queue with the pending requests + queue *prque.Prque[int64, any] // Priority queue with the pending requests fetches map[int]int // Number of active fetches per trie node depth } // NewSync creates a new trie data download scheduler. -func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback) *Sync { +func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, scheme string) *Sync { ts := &Sync{ + scheme: scheme, database: database, membatch: newSyncMemBatch(), nodeReqs: make(map[string]*nodeRequest), codeReqs: make(map[common.Hash]*codeRequest), - queue: prque.New(nil), + queue: prque.New[int64, any](nil), // Ugh, can contain both string and hash, whyyy fetches: make(map[int]int), } ts.AddSubTrie(root, nil, common.Hash{}, nil, callback) @@ -173,7 +191,8 @@ func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, par if s.membatch.hasNode(path) { return } - if rawdb.HasTrieNode(s.database, root) { + owner, inner := ResolvePath(path) + if rawdb.HasTrieNode(s.database, owner, inner, root, s.scheme) { return } // Assemble the new sub-trie sync request @@ -206,7 +225,7 @@ func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash, p return } // If database says duplicate, the blob is present for sure. - // Note we only check the existence with new code scheme, fast + // Note we only check the existence with new code scheme, snap // sync is expected to run with a fresh new node. Even there // exists the code with legacy format, fetch and store with // new scheme anyway. @@ -330,7 +349,8 @@ func (s *Sync) ProcessNode(result NodeSyncResult) error { func (s *Sync) Commit(dbw ethdb.Batch) error { // Dump the membatch into a database dbw for path, value := range s.membatch.nodes { - rawdb.WriteTrieNode(dbw, s.membatch.hashes[path], value) + owner, inner := ResolvePath([]byte(path)) + rawdb.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value, s.scheme) } for hash, value := range s.membatch.codes { rawdb.WriteCode(dbw, hash, value) @@ -451,8 +471,11 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) { // If database says duplicate, then at least the trie node is present // and we hold the assumption that it's NOT legacy contract code. - chash := common.BytesToHash(node) - if rawdb.HasTrieNode(s.database, chash) { + var ( + chash = common.BytesToHash(node) + owner, inner = ResolvePath(child.path) + ) + if rawdb.HasTrieNode(s.database, owner, inner, chash, s.scheme) { return } // Locally unknown node, schedule for retrieval @@ -526,3 +549,14 @@ func (s *Sync) commitCodeRequest(req *codeRequest) error { } return nil } + +// ResolvePath resolves the provided composite node path by separating the +// path in account trie if it's existent. +func ResolvePath(path []byte) (common.Hash, []byte) { + var owner common.Hash + if len(path) >= 2*common.HashLength { + owner = common.BytesToHash(hexToKeybytes(path[:2*common.HashLength])) + path = path[2*common.HashLength:] + } + return owner, path +} diff --git a/trie/sync_test.go b/trie/sync_test.go index b067642b85..621a0a23e0 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -18,6 +18,7 @@ package trie import ( "bytes" + "github.com/PlatONnetwork/PlatON-Go/core/rawdb" "testing" "github.com/PlatONnetwork/PlatON-Go/ethdb/memorydb" @@ -29,7 +30,7 @@ import ( // makeTestTrie create a sample test trie to test node-wise reconstruction. func makeTestTrie() (*Database, *StateTrie, map[string][]byte) { // Create an empty trie - triedb := NewDatabase(memorydb.New()) + triedb := NewDatabase(rawdb.NewMemoryDatabase()) trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, triedb) // Fill it with some arbitrary data @@ -97,13 +98,13 @@ type trieElement struct { // Tests that an empty trie is not scheduled for syncing. func TestEmptySync(t *testing.T) { - dbA := NewDatabase(memorydb.New()) - dbB := NewDatabase(memorydb.New()) + dbA := NewDatabase(rawdb.NewMemoryDatabase()) + dbB := NewDatabase(rawdb.NewMemoryDatabase()) emptyA := NewEmpty(dbA) emptyB, _ := New(common.Hash{}, emptyRoot, dbB) for i, trie := range []*Trie{emptyA, emptyB} { - sync := NewSync(trie.Hash(), memorydb.New(), nil) + sync := NewSync(trie.Hash(), memorydb.New(), nil, []*Database{dbA, dbB}[i].Scheme()) if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, paths, nodes, codes) } @@ -122,9 +123,9 @@ func testIterativeSync(t *testing.T, count int, bypath bool) { srcDb, srcTrie, srcData := makeTestTrie() // Create a destination trie and sync with the scheduler - diskdb := memorydb.New() + diskdb := rawdb.NewMemoryDatabase() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil) + sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) // The code requests are ignored here since there is no code // at the testing trie. @@ -188,9 +189,9 @@ func TestIterativeDelayedSync(t *testing.T) { srcDb, srcTrie, srcData := makeTestTrie() // Create a destination trie and sync with the scheduler - diskdb := memorydb.New() + diskdb := rawdb.NewMemoryDatabase() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil) + sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) // The code requests are ignored here since there is no code // at the testing trie. @@ -249,9 +250,9 @@ func testIterativeRandomSync(t *testing.T, count int) { srcDb, srcTrie, srcData := makeTestTrie() // Create a destination trie and sync with the scheduler - diskdb := memorydb.New() + diskdb := rawdb.NewMemoryDatabase() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil) + sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) // The code requests are ignored here since there is no code // at the testing trie. @@ -307,9 +308,9 @@ func TestIterativeRandomDelayedSync(t *testing.T) { srcDb, srcTrie, srcData := makeTestTrie() // Create a destination trie and sync with the scheduler - diskdb := memorydb.New() + diskdb := rawdb.NewMemoryDatabase() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil) + sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) // The code requests are ignored here since there is no code // at the testing trie. @@ -370,9 +371,9 @@ func TestDuplicateAvoidanceSync(t *testing.T) { srcDb, srcTrie, srcData := makeTestTrie() // Create a destination trie and sync with the scheduler - diskdb := memorydb.New() + diskdb := rawdb.NewMemoryDatabase() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil) + sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) // The code requests are ignored here since there is no code // at the testing trie. @@ -433,9 +434,9 @@ func TestIncompleteSync(t *testing.T) { srcDb, srcTrie, _ := makeTestTrie() // Create a destination trie and sync with the scheduler - diskdb := memorydb.New() + diskdb := rawdb.NewMemoryDatabase() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil) + sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) // The code requests are ignored here since there is no code // at the testing trie. @@ -513,9 +514,9 @@ func TestSyncOrdering(t *testing.T) { srcDb, srcTrie, srcData := makeTestTrie() // Create a destination trie and sync with the scheduler, tracking the requests - diskdb := memorydb.New() + diskdb := rawdb.NewMemoryDatabase() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil) + sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme()) // The code requests are ignored here since there is no code // at the testing trie. diff --git a/trie/trie.go b/trie/trie.go index 0615241f66..ce8ce0041e 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -36,22 +36,6 @@ var ( emptyState = crypto.Keccak256Hash(nil) ) -// LeafCallback is a callback type invoked when a trie operation reaches a leaf -// node. -// -// The keys is a path tuple identifying a particular trie node either in a single -// trie (account) or a layered trie (account -> storage). Each key in the tuple -// is in the raw format(32 bytes). -// -// The path is a composite hexary path identifying the trie node. All the key -// bytes are converted to the hexary nibbles and composited with the parent path -// if the trie node is in a layered trie. -// -// It's used by state sync and commit to allow handling external references -// between account and storage tries. And also it's used in the state healing -// for extracting the raw states(leaf nodes) with corresponding paths. -type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error - // Trie is a Merkle Patricia Trie. // The zero value is an empty trie with no database. // Use New to create a trie that sits on top of a database. diff --git a/trie/trie_test.go b/trie/trie_test.go index 9c39a9b899..974b79e60f 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -35,8 +35,6 @@ import ( "github.com/PlatONnetwork/PlatON-Go/ethdb/leveldb" - "github.com/PlatONnetwork/PlatON-Go/ethdb/memorydb" - "github.com/stretchr/testify/assert" "github.com/davecgh/go-spew/spew" @@ -53,7 +51,7 @@ func init() { // Used for testing func newEmpty() *Trie { - trie := NewEmpty(NewDatabase(memorydb.New())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) return trie } @@ -77,7 +75,7 @@ func TestNull(t *testing.T) { } func TestMissingRoot(t *testing.T) { - trie, err := New(common.Hash{}, common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), NewDatabase(memorydb.New())) + trie, err := New(common.Hash{}, common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), NewDatabase(rawdb.NewMemoryDatabase())) if trie != nil { t.Error("New returned non-nil trie for invalid root") } @@ -90,7 +88,7 @@ func TestMissingNodeDisk(t *testing.T) { testMissingNode(t, false) } func TestMissingNodeMemonly(t *testing.T) { testMissingNode(t, true) } func testMissingNode(t *testing.T, memonly bool) { - diskdb := memorydb.New() + diskdb := rawdb.NewMemoryDatabase() triedb := NewDatabase(diskdb) trie := NewEmpty(triedb) @@ -412,7 +410,7 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value { } func runRandTest(rt randTest) bool { - triedb := NewDatabase(memorydb.New()) + triedb := NewDatabase(rawdb.NewMemoryDatabase()) tr := NewEmpty(triedb) values := make(map[string]string) // tracks content of the trie @@ -564,12 +562,7 @@ func BenchmarkHash(b *testing.B) { } func tempDB(tb testing.TB) *Database { - dir := tb.TempDir() - diskdb, err := leveldb.New(dir, 256, 0, "", false) - if err != nil { - panic(fmt.Sprintf("can't create temporary database: %v", err)) - } - return NewDatabase(diskdb) + return NewDatabase(rawdb.NewMemoryDatabase()) } func getString(trie *Trie, k string) []byte { @@ -598,7 +591,7 @@ func TestDecodeNode(t *testing.T) { } func TestDeepCopy(t *testing.T) { - memdb := memorydb.New() + memdb := rawdb.NewMemoryDatabase() triedb := NewDatabase(memdb) root := common.Hash{} tr, _ := NewSecure(common.Hash{}, root, triedb) @@ -732,7 +725,7 @@ func TestOneTrieCollision(t *testing.T) { return nil } - mem := memorydb.New() + mem := rawdb.NewMemoryDatabase() memdb := NewDatabase(mem) trie := NewEmpty(memdb) for _, d := range trieData1 { @@ -778,9 +771,9 @@ func TestTwoTrieCollision(t *testing.T) { {common.BytesToHash([]byte{2, 0}).Bytes()[:31], []byte{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}}, } - mem1 := memorydb.New() + mem1 := rawdb.NewMemoryDatabase() memdb1 := NewDatabase(mem1) - mem2 := memorydb.New() + mem2 := rawdb.NewMemoryDatabase() memdb2 := NewDatabase(mem2) trie1 := NewEmpty(memdb1) trie2 := NewEmpty(memdb2) @@ -911,7 +904,7 @@ func TestTrieHashByDisorderedData(t *testing.T) { // Disrupted order for i := 0; i < 1; i++ { start = time.Now() - trie2 := NewEmpty(NewDatabase(memorydb.New())) + trie2 := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) triekvPairs2 := orderDisrupted(triekvPairs) for i := 0; i < len(triekvPairs2); i++ { err := trie2.TryUpdate(triekvPairs2[i].k, triekvPairs2[i].v) @@ -954,7 +947,7 @@ func TestTrieHashByUpdate(t *testing.T) { // Dag Trie for i := 0; i < 1; i++ { start = time.Now() - trie2 := NewEmpty(NewDatabase(memorydb.New())) + trie2 := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) for i := 0; i < len(triekvPairs); i++ { err := trie2.TryUpdate(triekvPairs[i].k, triekvPairs[i].v) if err != nil { diff --git a/x/plugin/reward_plugin.go b/x/plugin/reward_plugin.go index 153e5ca5c4..68237db62b 100644 --- a/x/plugin/reward_plugin.go +++ b/x/plugin/reward_plugin.go @@ -231,7 +231,7 @@ func (rmp *RewardMgrPlugin) ReturnDelegateReward(address common.Address, amount DelegateRewardPool := state.GetBalance(vm.DelegateRewardPoolAddr) if DelegateRewardPool.Cmp(amount) < 0 { - return fmt.Errorf("DelegateRewardPool balance is not enougth,want %v have %v", amount, DelegateRewardPool) + return fmt.Errorf("DelegateRewardPool balance is not enough,want %v have %v", amount, DelegateRewardPool) } state.SubBalance(vm.DelegateRewardPoolAddr, amount) diff --git a/x/plugin/staking_plugin.go b/x/plugin/staking_plugin.go index 6980096b2e..a6039281d7 100644 --- a/x/plugin/staking_plugin.go +++ b/x/plugin/staking_plugin.go @@ -1398,7 +1398,7 @@ func (sk *StakingPlugin) ElectNextVerifierList(blockHash common.Hash, blockNumbe return staking.ErrBlockNumberDisordered } - // caculate the new epoch start and end + // calculate the new epoch start and end newVerifierArr := &staking.ValidatorArray{ Start: oldIndex.End + 1, End: oldIndex.End + xutil.CalcBlocksEachEpoch(), @@ -1965,7 +1965,7 @@ func (sk *StakingPlugin) Election(blockHash common.Hash, header *types.Header, s panic("The Current Epoch VerifierList is empty, blockNumber: " + fmt.Sprint(blockNumber)) } - // caculate the next round start and end + // calculate the next round start and end start := curr.End + 1 end := curr.End + xutil.ConsensusSize()