diff --git a/CHANGELOG.md b/CHANGELOG.md index 657ede80d0..620968ab27 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +# v0.9.5 - 2022-08-31 + +> This release introduces a warpsync plugin for fast epochs retrieval, a simplified faucet, local snapshot improvements, and network and general bug fixes. + +- WarpSync: simplify & fix send on closed channel (#2407) +- Fix network and warpsync bugs & reset genesis time (#2406) +- Mana vector fixes (#2396) +- Implement simplified faucet (#2391) +- Update to latest hive.go (#2400) +- Warpsync: epoch syncing (#2367) +- Activity committments and activity log based on epochs (#2345) +- Implement solid entry points (#2373) + # v0.9.4 - 2022-08-08 > This release mostly include maintenance changes to the deployment scripts and minor bug fixes. diff --git a/Makefile b/Makefile index 9a0734542d..cee82c15b2 100644 --- a/Makefile +++ b/Makefile @@ -20,11 +20,10 @@ proto: $(PROTO_GO_FILES) # If $GOPATH/bin/protoc-gen-go does not exist, we'll run this command to install it. $(PROTOC_GEN_GO): go install google.golang.org/protobuf/cmd/protoc-gen-go - go install google.golang.org/grpc/cmd/protoc-gen-go-grpc # Implicit compile rule for GRPC/proto files %.pb.go: %.proto | $(PROTOC_GEN_GO) - protoc $< --go_out=paths=source_relative:. --go-grpc_out=paths=source_relative:. + protoc $< --go_out=paths=source_relative:. .PHONY: clean_proto clean_proto: diff --git a/client/wallet/packages/address/address.go b/client/wallet/packages/address/address.go index e10127eaaf..513335a4ca 100644 --- a/client/wallet/packages/address/address.go +++ b/client/wallet/packages/address/address.go @@ -31,8 +31,8 @@ func (a Address) Base58() string { func (a Address) String() string { return stringify.Struct("Address", - stringify.StructField("Address", a.Address()), - stringify.StructField("Index", a.Index), + stringify.NewStructField("Address", a.Address()), + stringify.NewStructField("Index", a.Index), ) } diff --git a/client/wallet/packages/sendoptions/options.go b/client/wallet/packages/sendoptions/options.go index 84c672e88d..8dac243d57 100644 --- a/client/wallet/packages/sendoptions/options.go +++ b/client/wallet/packages/sendoptions/options.go @@ -1,6 +1,7 @@ package sendoptions import ( + "context" "time" "github.com/cockroachdb/errors" @@ -65,6 +66,25 @@ func Remainder(addr address.Address) SendFundsOption { } } +// Sources is an option for the SendFunds call that allows to specify the addresses from which the outputs for the +// transfer should be sourced. +func Sources(addr ...address.Address) SendFundsOption { + return func(options *SendFundsOptions) error { + options.SourceAddresses = addr + + return nil + } +} + +// Context is an option for SendFunds call that allows to specify a context that is used in case of waiting for +// transaction acceptance. +func Context(ctx context.Context) SendFundsOption { + return func(options *SendFundsOptions) error { + options.Context = ctx + return nil + } +} + // AccessManaPledgeID is an option for SendFunds call that defines the nodeID to pledge access mana to. func AccessManaPledgeID(nodeID string) SendFundsOption { return func(options *SendFundsOptions) error { @@ -143,6 +163,8 @@ type SendFundsOptions struct { ConsensusManaPledgeID string WaitForConfirmation bool UsePendingOutputs bool + SourceAddresses []address.Address + Context context.Context } // RequiredFunds derives how much funds are needed based on the Destinations to fund the transfer. diff --git a/client/wallet/wallet.go b/client/wallet/wallet.go index a080545b8b..bf369b2728 100644 --- a/client/wallet/wallet.go +++ b/client/wallet/wallet.go @@ -1,6 +1,7 @@ package wallet import ( + "context" "reflect" "time" "unsafe" @@ -115,7 +116,7 @@ func (wallet *Wallet) SendFunds(options ...sendoptions.SendFundsOption) (tx *dev // how much funds will we need to fund this transfer? requiredFunds := sendOptions.RequiredFunds() // collect that many outputs for funding - consumedOutputs, err := wallet.collectOutputsForFunding(requiredFunds, sendOptions.UsePendingOutputs) + consumedOutputs, err := wallet.collectOutputsForFunding(requiredFunds, sendOptions.UsePendingOutputs, sendOptions.SourceAddresses...) if err != nil { if errors.Is(err, ErrTooManyOutputs) { err = errors.Errorf("consolidate funds and try again: %w", err) @@ -169,7 +170,7 @@ func (wallet *Wallet) SendFunds(options ...sendoptions.SendFundsOption) (tx *dev return nil, err } if sendOptions.WaitForConfirmation { - err = wallet.WaitForTxAcceptance(tx.ID()) + err = wallet.WaitForTxAcceptance(tx.ID(), sendOptions.Context) } return tx, err @@ -1815,20 +1816,30 @@ func (wallet *Wallet) ExportState() []byte { // region WaitForTxAcceptance ////////////////////////////////////////////////////////////////////////////////////////// // WaitForTxAcceptance waits for the given tx to be accepted. -func (wallet *Wallet) WaitForTxAcceptance(txID utxo.TransactionID) (err error) { +func (wallet *Wallet) WaitForTxAcceptance(txID utxo.TransactionID, optionalCtx ...context.Context) (err error) { + ctx := context.Background() + if len(optionalCtx) == 1 && optionalCtx[0] != nil { + ctx = optionalCtx[0] + } + + ticker := time.NewTicker(wallet.ConfirmationPollInterval) timeoutCounter := time.Duration(0) for { - time.Sleep(wallet.ConfirmationPollInterval) - timeoutCounter += wallet.ConfirmationPollInterval - confirmationState, fetchErr := wallet.connector.GetTransactionConfirmationState(txID) - if fetchErr != nil { - return fetchErr - } - if confirmationState.IsAccepted() { - return - } - if timeoutCounter > wallet.ConfirmationTimeout { - return errors.Errorf("transaction %s did not confirm within %d seconds", txID.Base58(), wallet.ConfirmationTimeout/time.Second) + select { + case <-ctx.Done(): + return errors.Errorf("context cancelled") + case <-ticker.C: + timeoutCounter += wallet.ConfirmationPollInterval + confirmationState, fetchErr := wallet.connector.GetTransactionConfirmationState(txID) + if fetchErr != nil { + return fetchErr + } + if confirmationState.IsAccepted() { + return + } + if timeoutCounter > wallet.ConfirmationTimeout { + return errors.Errorf("transaction %s did not confirm within %d seconds", txID.Base58(), wallet.ConfirmationTimeout/time.Second) + } } } } @@ -1968,13 +1979,15 @@ func (wallet *Wallet) findStateControlledAliasOutputByAliasID(id *devnetvm.Alias // collectOutputsForFunding tries to collect unspent outputs to fund fundingBalance. // It may collect pending outputs according to flag. -func (wallet *Wallet) collectOutputsForFunding(fundingBalance map[devnetvm.Color]uint64, includePending bool) (OutputsByAddressAndOutputID, error) { +func (wallet *Wallet) collectOutputsForFunding(fundingBalance map[devnetvm.Color]uint64, includePending bool, addresses ...address.Address) (OutputsByAddressAndOutputID, error) { if fundingBalance == nil { return nil, errors.Errorf("can't collect fund: empty fundingBalance provided") } _ = wallet.outputManager.Refresh() - addresses := wallet.addressManager.Addresses() + if len(addresses) == 0 { + addresses = wallet.addressManager.Addresses() + } unspentOutputs := wallet.outputManager.UnspentValueOutputs(includePending, addresses...) collected := make(map[devnetvm.Color]uint64) diff --git a/deploy/ansible/roles/goshimmer-node/templates/docker-compose-entrynode.yml.j2 b/deploy/ansible/roles/goshimmer-node/templates/docker-compose-entrynode.yml.j2 index c6bf57882e..fba9bfe1fa 100644 --- a/deploy/ansible/roles/goshimmer-node/templates/docker-compose-entrynode.yml.j2 +++ b/deploy/ansible/roles/goshimmer-node/templates/docker-compose-entrynode.yml.j2 @@ -26,6 +26,6 @@ services: {% endif %} --autoPeering.entryNodes= --analysis.client.serverAddress= - --node.disablePlugins=activity,analysisClient,chat,consensus,dashboard,faucet,gossip,firewall,issuer,mana,manualpeering,blockLayer,metrics,networkdelay,portcheck,pow,syncBeaconFollower,webAPIBroadcastDataEndpoint,WebAPIDataEndpoint,WebAPIHealthzEndpoint,WebAPIFaucetRequestEndpoint,webAPIFindTransactionHashesEndpoint,webAPIGetNeighborsEndpoint,webAPIGetTransactionObjectsByHashEndpoint,webAPIGetTransactionTrytesByHashEndpoint,WebAPIInfoEndpoint,WebAPILedgerstateEndpoint,WebAPIBlockEndpoint,WebAPIToolsBlockEndpoint,WebAPIWeightProviderEndpoint,remotelog,remotelogmetrics,DAGsVisualizer,WebAPIRateSetterEndpoint,WebAPISchedulerEndpoint,ManaInitializer,Notarization,EpochStorage,WebAPIEpochEndpoint,BootstrapManager + --node.disablePlugins=activity,analysisClient,chat,consensus,dashboard,faucet,gossip,firewall,issuer,mana,manualpeering,blockLayer,metrics,networkdelay,portcheck,pow,syncBeaconFollower,webAPIBroadcastDataEndpoint,WebAPIDataEndpoint,WebAPIHealthzEndpoint,WebAPIFaucetRequestEndpoint,webAPIFindTransactionHashesEndpoint,webAPIGetNeighborsEndpoint,webAPIGetTransactionObjectsByHashEndpoint,webAPIGetTransactionTrytesByHashEndpoint,WebAPIInfoEndpoint,WebAPILedgerstateEndpoint,WebAPIBlockEndpoint,WebAPIToolsBlockEndpoint,WebAPIWeightProviderEndpoint,remotelog,remotelogmetrics,DAGsVisualizer,WebAPIRateSetterEndpoint,WebAPISchedulerEndpoint,ManaInitializer,Notarization,EpochStorage,WebAPIEpochEndpoint,BootstrapManager,Warpsync,Snapshot --logger.level={{ logLevel }} --logger.outputPaths=stdout diff --git a/go.mod b/go.mod index 3e87529f5a..6a8a02ead8 100644 --- a/go.mod +++ b/go.mod @@ -12,13 +12,12 @@ require ( github.com/gin-gonic/gin v1.7.7 github.com/go-resty/resty/v2 v2.6.0 github.com/gorilla/websocket v1.5.0 - github.com/iotaledger/hive.go/core v0.0.0-20220804174551-efbca20a83e4 - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-20220804174551-efbca20a83e4 + github.com/iotaledger/hive.go/core v1.0.0-beta.3.0.20220825155653-0a69188181ca + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-beta.2.0.20220825155653-0a69188181ca github.com/labstack/echo v3.3.10+incompatible github.com/labstack/gommon v0.3.0 github.com/libp2p/go-libp2p v0.15.0 github.com/libp2p/go-libp2p-core v0.9.0 - github.com/libp2p/go-yamux/v2 v2.2.0 github.com/magiconair/properties v1.8.6 github.com/markbates/pkger v0.17.1 github.com/mr-tron/base58 v1.2.0 @@ -26,6 +25,7 @@ require ( github.com/multiformats/go-varint v0.0.6 github.com/panjf2000/ants/v2 v2.5.0 github.com/paulbellamy/ratecounter v0.2.0 + github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.11.1 github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible github.com/spf13/pflag v1.0.5 @@ -52,6 +52,7 @@ require ( github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/emirpasic/gods v1.18.1 // indirect + github.com/ethereum/go-ethereum v1.10.21 // indirect github.com/fatih/structs v1.1.0 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect @@ -64,13 +65,14 @@ require ( github.com/go-stack/stack v1.8.0 // indirect github.com/gobuffalo/here v0.6.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/gohornet/grocksdb v1.7.1-0.20220426081058-60f50d7c59e8 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/huin/goupnp v1.0.2 // indirect + github.com/huin/goupnp v1.0.3 // indirect + github.com/iancoleman/orderedmap v0.2.0 // indirect + github.com/iotaledger/grocksdb v1.7.5-0.20220808142449-1dc0b8ac4d7d // indirect github.com/ipfs/go-cid v0.0.7 // indirect github.com/ipfs/go-ipfs-util v0.0.2 // indirect github.com/ipfs/go-log v1.0.5 // indirect @@ -123,6 +125,7 @@ require ( github.com/libp2p/go-stream-muxer-multistream v0.3.0 // indirect github.com/libp2p/go-tcp-transport v0.2.8 // indirect github.com/libp2p/go-ws-transport v0.5.0 // indirect + github.com/libp2p/go-yamux/v2 v2.2.0 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect @@ -151,7 +154,6 @@ require ( github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.2 // indirect github.com/petermattis/goid v0.0.0-20220712135657-ac599d9cba15 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.30.0 // indirect @@ -180,10 +182,10 @@ require ( go.dedis.ch/fixbuf v1.0.3 // indirect go.mongodb.org/mongo-driver v1.5.1 // indirect go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b // indirect + go.uber.org/zap v1.22.0 // indirect + golang.org/x/net v0.0.0-20220809012201-f428fae20770 // indirect golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect - golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704 // indirect + golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect diff --git a/go.sum b/go.sum index e31d939e59..7e2cf3af92 100644 --- a/go.sum +++ b/go.sum @@ -236,6 +236,7 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go. github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ethereum/go-ethereum v1.10.21 h1:5lqsEx92ZaZzRyOqBEXux4/UR06m296RGzN3ol3teJY= +github.com/ethereum/go-ethereum v1.10.21/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= @@ -342,8 +343,6 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= -github.com/gohornet/grocksdb v1.7.1-0.20220426081058-60f50d7c59e8 h1:JBcaA1xdFtalpZsMTYZuwUSOZxPAqqYAZa1gKYpK9nw= -github.com/gohornet/grocksdb v1.7.1-0.20220426081058-60f50d7c59e8/go.mod h1:RlgTltBHJ3ha/p0pWAd1g2zjw/524L1Vw6pjBTYLdIA= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -486,20 +485,25 @@ github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4Dvx github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI= github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= +github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= +github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/iancoleman/orderedmap v0.2.0 h1:sq1N/TFpYH++aViPcaKjys3bDClUEU7s5B+z6jq8pNA= +github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/iotaledger/hive.go/core v0.0.0-20220804174551-efbca20a83e4 h1:LyZZsG5V5esS3iN6f42AVyUTxodo8s8KkkHYQi+CC8o= -github.com/iotaledger/hive.go/core v0.0.0-20220804174551-efbca20a83e4/go.mod h1:iiNz8/E6xPs6TWPiBKLB7QZEbaaSSNylnfWKThZNCEc= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-20220804174551-efbca20a83e4 h1:B2N7jbiIKEkLPPA5/kyDXqO6T+cPr5xhhDKS3Tjph9I= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-20220804174551-efbca20a83e4/go.mod h1:R6Q0aeFvUzt7+Mjd7fEXeCHOHasMLmXPxUGhCybTSw4= +github.com/iotaledger/grocksdb v1.7.5-0.20220808142449-1dc0b8ac4d7d h1:KYc/EkMX3CXvsYyUC9EvToUeYc0c74ZwjRg/0Wd27LU= +github.com/iotaledger/grocksdb v1.7.5-0.20220808142449-1dc0b8ac4d7d/go.mod h1:DuNKJ1G/vKugT7WGAoftMTu2aApNNxF4ADFMxLmKS2Y= +github.com/iotaledger/hive.go/core v1.0.0-beta.3.0.20220825155653-0a69188181ca h1:IyCodMz8Hz51t9Hh6/dQIEoJH8Un5neyeTaNDmOmwpE= +github.com/iotaledger/hive.go/core v1.0.0-beta.3.0.20220825155653-0a69188181ca/go.mod h1:aBKzVl6kjSz2bHsNoxAmTJUpHf/sr1x0PdOJbteEcsQ= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-beta.2.0.20220825155653-0a69188181ca h1:ZCJLYXxqi9hUo89BnJ7UVXLqruLZjdXX9tEY0J0aXYE= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-beta.2.0.20220825155653-0a69188181ca/go.mod h1:beZKjVT4HPayWfwsmItNNI5E81rS783vGx5ZwRbZQgY= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -1346,7 +1350,6 @@ go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -1361,8 +1364,8 @@ go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= +go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1498,8 +1501,8 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b h1:3ogNYyK4oIQdIKzTu68hQrr4iuVxF3AxKl9Aj/eDrw0= -golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220809012201-f428fae20770 h1:dIi4qVdvjZEjiMDv7vhokAZNGnz3kepwuXqFKYDdDMs= +golang.org/x/net v0.0.0-20220809012201-f428fae20770/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1620,8 +1623,8 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704 h1:Y7NOhdqIOU8kYI7BxsgL38d0ot0raxvcW+EMQU2QrT4= -golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= @@ -1714,8 +1717,7 @@ golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 h1:0c3L82FDQ5rt1bjTBlchS8t6RQ6299/+5bWMnRLh+uI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/packages/app/chat/events.go b/packages/app/chat/events.go index 7015ecbf35..3230ded2a3 100644 --- a/packages/app/chat/events.go +++ b/packages/app/chat/events.go @@ -18,7 +18,7 @@ func newEvents() (new *Events) { } } -// Event defines the information passed when a chat event fires. +// BlockReceivedEvent defines the information passed when a chat event fires. type BlockReceivedEvent struct { From string To string diff --git a/packages/app/faucet/request_test.go b/packages/app/faucet/request_test.go index 0f63907064..96f23f747e 100644 --- a/packages/app/faucet/request_test.go +++ b/packages/app/faucet/request_test.go @@ -19,8 +19,8 @@ import ( func TestRequest(t *testing.T) { keyPair := ed25519.GenerateKeyPair() address := devnetvm.NewED25519Address(keyPair.PublicKey) - access, _ := identity.RandomID() - consensus, _ := identity.RandomID() + access, _ := identity.RandomIDInsecure() + consensus, _ := identity.RandomIDInsecure() originalRequest := NewRequest(address, access, consensus, 0) diff --git a/packages/core/consensus/acceptance/gadget.go b/packages/core/consensus/acceptance/gadget.go index 20e1b7d4af..2ad019a4ea 100644 --- a/packages/core/consensus/acceptance/gadget.go +++ b/packages/core/consensus/acceptance/gadget.go @@ -26,7 +26,7 @@ const ( var ( // DefaultConflictTranslation is the default function to translate the approval weight to confirmation.State of a conflict. - DefaultConflictTranslation ConflictThresholdTranslation = func(conflictID utxo.TransactionID, aw float64) confirmation.State { + DefaultConflictTranslation ConflictThresholdTranslation = func(_ utxo.TransactionID, aw float64) confirmation.State { if aw >= acceptanceThreshold { return confirmation.Accepted } diff --git a/packages/core/epoch/types.go b/packages/core/epoch/types.go index a76b78f705..48ad759fba 100644 --- a/packages/core/epoch/types.go +++ b/packages/core/epoch/types.go @@ -3,23 +3,34 @@ package epoch import ( "context" "fmt" + "github.com/cockroachdb/errors" + "github.com/iotaledger/hive.go/core/generics/set" + "github.com/iotaledger/hive.go/core/generics/shrinkingmap" + "github.com/iotaledger/hive.go/core/identity" + "strings" "time" + "github.com/iotaledger/hive.go/core/byteutils" "github.com/iotaledger/hive.go/core/generics/model" "github.com/iotaledger/hive.go/core/serix" "github.com/mr-tron/base58" "golang.org/x/crypto/blake2b" - - "github.com/iotaledger/goshimmer/packages/node/clock" ) var ( // GenesisTime is the time (Unix in seconds) of the genesis. - GenesisTime int64 = 1656588336 + GenesisTime int64 = 1661859573 // Duration is the default epoch duration in seconds. Duration int64 = 10 ) +func init() { + err := serix.DefaultAPI.RegisterTypeSettings(nodesActivitySerializableMap{}, serix.TypeSettings{}.WithLengthPrefixType(serix.LengthPrefixTypeAsUint32)) + if err != nil { + panic(fmt.Errorf("error registering NodesActivityLog type settings: %w", err)) + } +} + // Index is the ID of an epoch. type Index int64 @@ -67,11 +78,6 @@ func (i Index) EndTime() time.Time { return time.Unix(endUnix, 0) } -// CurrentEpochIndex returns the EI at the current RATT time. -func CurrentEpochIndex() Index { - return IndexFromTime(clock.SyncedTime()) -} - // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// type MerkleRoot [blake2b.Size256]byte @@ -81,7 +87,7 @@ type ( EC = MerkleRoot ) -func NewMerkleRoot(bytes []byte) (mr MerkleRoot) { +func NewMerkleRoot(bytes []byte) MerkleRoot { b := [blake2b.Size256]byte{} copy(b[:], bytes[:]) return b @@ -95,15 +101,24 @@ func (m MerkleRoot) Bytes() []byte { return m[:] } +// CommitmentRoots contains roots of trees of an epoch. +type CommitmentRoots struct { + TangleRoot MerkleRoot `serix:"0"` + StateMutationRoot MerkleRoot `serix:"1"` + StateRoot MerkleRoot `serix:"2"` + ManaRoot MerkleRoot `serix:"3"` +} + // ECRecord is a storable object represents the ecRecord of an epoch. type ECRecord struct { model.Storable[Index, ECRecord, *ECRecord, ecRecord] `serix:"0"` } type ecRecord struct { - EI Index `serix:"0"` - ECR ECR `serix:"1"` - PrevEC EC `serix:"2"` + EI Index `serix:"0"` + ECR ECR `serix:"1"` + PrevEC EC `serix:"2"` + Roots *CommitmentRoots `serix:"3"` } // NewECRecord creates and returns a ECRecord of the given EI. @@ -112,6 +127,7 @@ func NewECRecord(ei Index) (new *ECRecord) { EI: ei, ECR: MerkleRoot{}, PrevEC: MerkleRoot{}, + Roots: &CommitmentRoots{}, }) new.SetID(ei) return @@ -179,3 +195,216 @@ func (e *ECRecord) FromBytes(bytes []byte) (err error) { return } + +// Roots returns the CommitmentRoots of an ECRecord. +func (e *ECRecord) Roots() *CommitmentRoots { + e.RLock() + defer e.RUnlock() + + return e.M.Roots +} + +// SetRoots sets the CommitmentRoots of an ECRecord. +func (e *ECRecord) SetRoots(roots *CommitmentRoots) { + e.Lock() + defer e.Unlock() + + e.M.Roots = roots + e.SetModified() +} + +// ComputeEC calculates the epoch commitment hash from the given ECRecord. +func (e *ECRecord) ComputeEC() (ec EC) { + ecHash := blake2b.Sum256(byteutils.ConcatBytes(e.EI().Bytes(), e.ECR().Bytes(), e.PrevEC().Bytes())) + + return NewMerkleRoot(ecHash[:]) +} + +// region hashing functions //////////////////////////////////////////////////////////////////////////////////////////// + +// ComputeECR calculates an ECR from the tree roots. +func ComputeECR(tangleRoot, stateMutationRoot, stateRoot, manaRoot MerkleRoot) ECR { + branch1Hashed := blake2b.Sum256(byteutils.ConcatBytes(tangleRoot.Bytes(), stateMutationRoot.Bytes())) + branch2Hashed := blake2b.Sum256(byteutils.ConcatBytes(stateRoot.Bytes(), manaRoot.Bytes())) + rootHashed := blake2b.Sum256(byteutils.ConcatBytes(branch1Hashed[:], branch2Hashed[:])) + + return NewMerkleRoot(rootHashed[:]) +} + +// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// region NodesActivityLog ////////////////////////////////////////////////////////////////////////////////////////////////// + +type nodesActivitySerializableMap map[Index]*ActivityLog + +func (al *nodesActivitySerializableMap) FromBytes(data []byte) (err error) { + _, err = serix.DefaultAPI.Decode(context.Background(), data, al, serix.WithValidation()) + if err != nil { + err = errors.Errorf("failed to parse activeNodes: %w", err) + return + } + return +} + +func (al *nodesActivitySerializableMap) Bytes() []byte { + objBytes, err := serix.DefaultAPI.Encode(context.Background(), *al, serix.WithValidation()) + if err != nil { + panic(err) + } + return objBytes +} + +func (al *nodesActivitySerializableMap) nodesActivityLog() *NodesActivityLog { + activity := NewNodesActivityLog() + for ei, a := range *al { + activity.Set(ei, a) + } + return activity +} + +type NodesActivityLog struct { + shrinkingmap.ShrinkingMap[Index, *ActivityLog] `serix:"0,lengthPrefixType=uint32"` +} + +func (al *NodesActivityLog) FromBytes(data []byte) (err error) { + m := make(nodesActivitySerializableMap) + err = m.FromBytes(data) + if err != nil { + return err + } + al.loadActivityLogsMap(m) + return +} + +func (al *NodesActivityLog) Bytes() []byte { + m := al.activityLogsMap() + return m.Bytes() +} + +func NewNodesActivityLog() *NodesActivityLog { + return &NodesActivityLog{*shrinkingmap.New[Index, *ActivityLog]()} +} + +func (al *NodesActivityLog) activityLogsMap() *nodesActivitySerializableMap { + activityMap := make(nodesActivitySerializableMap) + al.ForEach(func(ei Index, activity *ActivityLog) bool { + activityMap[ei] = activity + return true + }) + return &activityMap +} + +func (al *NodesActivityLog) loadActivityLogsMap(m nodesActivitySerializableMap) { + for ei, a := range m { + al.Set(ei, a) + } + return +} + +// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// region ActivityLog ////////////////////////////////////////////////////////////////////////////////////////////////// + +// ActivityLog is a time-based log of node activity. It stores information when a node is active and provides +// functionality to query for certain timeframes. +type ActivityLog struct { + model.Mutable[ActivityLog, *ActivityLog, activityLogModel] `serix:"0"` +} + +// nodeActivityModel stores node identities and corresponding accepted block counters indicating how many blocks node issued in a given epoch. +type activityLogModel struct { + ActivityLog *set.AdvancedSet[identity.ID] `serix:"0,lengthPrefixType=uint32"` +} + +// NewActivityLog is the constructor for ActivityLog. +func NewActivityLog() *ActivityLog { + return model.NewMutable[ActivityLog](&activityLogModel{ActivityLog: set.NewAdvancedSet[identity.ID]()}) +} + +// Add adds a node to the activity log. +func (a *ActivityLog) Add(nodeID identity.ID) (added bool) { + return a.InnerModel().ActivityLog.Add(nodeID) +} + +// Remove removes a node from the activity log. +func (a *ActivityLog) Remove(nodeID identity.ID) (removed bool) { + return a.InnerModel().ActivityLog.Delete(nodeID) +} + +// Active returns true if the provided node was active. +func (a *ActivityLog) Active(nodeID identity.ID) (active bool) { + if a.InnerModel().ActivityLog.Has(nodeID) { + return true + } + + return +} + +// String returns a human-readable version of ActivityLog. +func (a *ActivityLog) String() string { + var builder strings.Builder + builder.WriteString(fmt.Sprintf("ActivityLog(len=%d, elements=", a.Size())) + a.InnerModel().ActivityLog.ForEach(func(nodeID identity.ID) (err error) { + builder.WriteString(fmt.Sprintf("%s, ", nodeID.String())) + return + }) + builder.WriteString(")") + return builder.String() +} + +// Clone clones the ActivityLog. +func (a *ActivityLog) Clone() *ActivityLog { + clone := NewActivityLog() + clone.InnerModel().ActivityLog = a.InnerModel().ActivityLog.Clone() + return clone +} + +// ForEach iterates through the activity set and calls the callback for every element. +func (a *ActivityLog) ForEach(callback func(nodeID identity.ID) (err error)) (err error) { + return a.InnerModel().ActivityLog.ForEach(callback) +} + +// Size returns the size of the activity log. +func (a *ActivityLog) Size() int { + return a.InnerModel().ActivityLog.Size() +} + +// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// SnapshotEpochActivity is the data structure to store node activity for the snapshot. +type SnapshotEpochActivity map[Index]*SnapshotNodeActivity + +// NewSnapshotEpochActivity creates a new SnapshotEpochActivity instance. +func NewSnapshotEpochActivity() SnapshotEpochActivity { + return make(SnapshotEpochActivity) +} + +// SnapshotNodeActivity is structure to store nodes activity for an epoch. +type SnapshotNodeActivity struct { + model.Mutable[SnapshotNodeActivity, *SnapshotNodeActivity, nodeActivityModel] `serix:"0"` +} + +// NewSnapshotNodeActivity creates a new SnapshotNodeActivity instance. +func NewSnapshotNodeActivity() *SnapshotNodeActivity { + return model.NewMutable[SnapshotNodeActivity](&nodeActivityModel{NodesLog: make(map[identity.ID]uint64)}) +} + +// nodeActivityModel stores node identities and corresponding accepted block counters indicating how many blocks node issued in a given epoch. +type nodeActivityModel struct { + NodesLog map[identity.ID]uint64 `serix:"0,lengthPrefixType=uint32"` +} + +// NodesLog returns its activity map of nodes. +func (s *SnapshotNodeActivity) NodesLog() map[identity.ID]uint64 { + return s.M.NodesLog +} + +// NodeActivity returns activity counter for a given node. +func (s *SnapshotNodeActivity) NodeActivity(nodeID identity.ID) uint64 { + return s.M.NodesLog[nodeID] +} + +// SetNodeActivity adds a node activity record to the activity log. +func (s *SnapshotNodeActivity) SetNodeActivity(nodeID identity.ID, activity uint64) { + s.M.NodesLog[nodeID] = activity +} diff --git a/packages/core/ledger/ledger.go b/packages/core/ledger/ledger.go index c7b15ded27..5babede66d 100644 --- a/packages/core/ledger/ledger.go +++ b/packages/core/ledger/ledger.go @@ -10,7 +10,6 @@ import ( "github.com/iotaledger/hive.go/core/types/confirmation" "github.com/iotaledger/goshimmer/packages/core/conflictdag" - "github.com/iotaledger/goshimmer/packages/core/epoch" "github.com/iotaledger/goshimmer/packages/core/ledger/utxo" ) @@ -99,27 +98,20 @@ func (l *Ledger) LoadOutputWithMetadatas(outputsWithMetadatas []*OutputWithMetad } // LoadEpochDiffs loads EpochDiffs from a snapshot file to the storage. -func (l *Ledger) LoadEpochDiffs(header *SnapshotHeader, epochDiffs map[epoch.Index]*EpochDiff) error { - for ei := header.FullEpochIndex + 1; ei <= header.DiffEpochIndex; ei++ { - epochdiff, exists := epochDiffs[ei] - if !exists { - panic("epoch diff not found for epoch") - } - - for _, spent := range epochdiff.Spent() { - l.Storage.outputStorage.Delete(spent.ID().Bytes()) - l.Storage.outputMetadataStorage.Delete(spent.ID().Bytes()) - } +func (l *Ledger) LoadEpochDiff(epochDiff *EpochDiff) error { + for _, spent := range epochDiff.Spent() { + l.Storage.outputStorage.Delete(spent.ID().Bytes()) + l.Storage.outputMetadataStorage.Delete(spent.ID().Bytes()) + } - for _, created := range epochdiff.Created() { - outputMetadata := NewOutputMetadata(created.ID()) - outputMetadata.SetAccessManaPledgeID(created.AccessManaPledgeID()) - outputMetadata.SetConsensusManaPledgeID(created.ConsensusManaPledgeID()) - outputMetadata.SetConfirmationState(confirmation.Confirmed) + for _, created := range epochDiff.Created() { + outputMetadata := NewOutputMetadata(created.ID()) + outputMetadata.SetAccessManaPledgeID(created.AccessManaPledgeID()) + outputMetadata.SetConsensusManaPledgeID(created.ConsensusManaPledgeID()) + outputMetadata.SetConfirmationState(confirmation.Confirmed) - l.Storage.outputStorage.Store(created.Output()).Release() - l.Storage.outputMetadataStorage.Store(outputMetadata).Release() - } + l.Storage.outputStorage.Store(created.Output()).Release() + l.Storage.outputMetadataStorage.Store(outputMetadata).Release() } return nil diff --git a/packages/core/ledger/models.go b/packages/core/ledger/models.go index 777ab2cd94..31b0d7c9c4 100644 --- a/packages/core/ledger/models.go +++ b/packages/core/ledger/models.go @@ -484,9 +484,9 @@ func (o *OutputsMetadata) ForEach(callback func(outputMetadata *OutputMetadata) // String returns a human-readable version of the OutputsMetadata. func (o *OutputsMetadata) String() (humanReadable string) { - structBuilder := stringify.StructBuilder("OutputsMetadata") + structBuilder := stringify.NewStructBuilder("OutputsMetadata") _ = o.ForEach(func(outputMetadata *OutputMetadata) error { - structBuilder.AddField(stringify.StructField(outputMetadata.ID().String(), outputMetadata)) + structBuilder.AddField(stringify.NewStructField(outputMetadata.ID().String(), outputMetadata)) return nil }) @@ -597,12 +597,12 @@ type outputWithMetadataModel struct { // String returns a human-readable version of the OutputWithMetadata. func (o *OutputWithMetadata) String() string { - structBuilder := stringify.StructBuilder("OutputWithMetadata") - structBuilder.AddField(stringify.StructField("OutputID", o.ID())) - structBuilder.AddField(stringify.StructField("Output", o.Output())) - structBuilder.AddField(stringify.StructField("CreationTime", o.CreationTime())) - structBuilder.AddField(stringify.StructField("ConsensusPledgeID", o.ConsensusManaPledgeID())) - structBuilder.AddField(stringify.StructField("AccessPledgeID", o.AccessManaPledgeID())) + structBuilder := stringify.NewStructBuilder("OutputWithMetadata") + structBuilder.AddField(stringify.NewStructField("OutputID", o.ID())) + structBuilder.AddField(stringify.NewStructField("Output", o.Output())) + structBuilder.AddField(stringify.NewStructField("CreationTime", o.CreationTime())) + structBuilder.AddField(stringify.NewStructField("ConsensusPledgeID", o.ConsensusManaPledgeID())) + structBuilder.AddField(stringify.NewStructField("AccessPledgeID", o.AccessManaPledgeID())) return structBuilder.String() } @@ -612,6 +612,7 @@ func NewOutputWithMetadata(outputID utxo.OutputID, output utxo.Output, creationT new = model.NewStorable[utxo.OutputID, OutputWithMetadata](&outputWithMetadataModel{ OutputID: outputID, Output: output, + CreationTime: creationTime, ConsensusManaPledgeID: consensusManaPledgeID, AccessManaPledgeID: accessManaPledgeID, }) diff --git a/packages/core/ledger/snapshot.go b/packages/core/ledger/snapshot.go index 8b54bfc973..3f16744e7a 100644 --- a/packages/core/ledger/snapshot.go +++ b/packages/core/ledger/snapshot.go @@ -8,9 +8,10 @@ import ( // Snapshot represents a snapshot of the current ledger state. type Snapshot struct { - Header *SnapshotHeader `serix:"0"` - OutputsWithMetadata []*OutputWithMetadata `serix:"1,lengthPrefixType=uint32"` - EpochDiffs map[epoch.Index]*EpochDiff `serix:"2,lengthPrefixType=uint32"` + Header *SnapshotHeader `serix:"0"` + OutputsWithMetadata []*OutputWithMetadata `serix:"1,lengthPrefixType=uint32"` + EpochDiffs map[epoch.Index]*EpochDiff `serix:"2,lengthPrefixType=uint32"` + EpochActiveNodes epoch.SnapshotEpochActivity `serix:"3,lengthPrefixType=uint32"` } // SnapshotHeader represents the info of a snapshot. @@ -22,28 +23,30 @@ type SnapshotHeader struct { } // NewSnapshot creates a new Snapshot from the given details. -func NewSnapshot(outputsWithMetadata []*OutputWithMetadata) (new *Snapshot) { +func NewSnapshot(outputsWithMetadata []*OutputWithMetadata, activeNodes epoch.SnapshotEpochActivity) (new *Snapshot) { return &Snapshot{ Header: &SnapshotHeader{OutputWithMetadataCount: uint64(len(outputsWithMetadata))}, OutputsWithMetadata: outputsWithMetadata, + EpochActiveNodes: activeNodes, } } // String returns a human-readable version of the Snapshot. func (s *Snapshot) String() (humanReadable string) { - structBuilder := stringify.StructBuilder("Snapshot") - structBuilder.AddField(stringify.StructField("SnapshotHeader", s.Header)) - structBuilder.AddField(stringify.StructField("OutputsWithMetadata", s.OutputsWithMetadata)) - structBuilder.AddField(stringify.StructField("EpochDiffs", s.EpochDiffs)) + structBuilder := stringify.NewStructBuilder("Snapshot") + structBuilder.AddField(stringify.NewStructField("SnapshotHeader", s.Header)) + structBuilder.AddField(stringify.NewStructField("OutputsWithMetadata", s.OutputsWithMetadata)) + structBuilder.AddField(stringify.NewStructField("EpochDiffs", s.EpochDiffs)) + structBuilder.AddField(stringify.NewStructField("EpochActiveNodes", s.EpochActiveNodes)) return structBuilder.String() } -// String returns a human-readable version of the Snapshot. +// String returns a human-readable version of the SnapshotHeader. func (h *SnapshotHeader) String() (humanReadable string) { return stringify.Struct("SnapshotHeader", - stringify.StructField("OutputWithMetadataCount", h.OutputWithMetadataCount), - stringify.StructField("FullEpochIndex", h.FullEpochIndex), - stringify.StructField("DiffEpochIndex", h.DiffEpochIndex), - stringify.StructField("LatestECRecord", h.LatestECRecord), + stringify.NewStructField("OutputWithMetadataCount", h.OutputWithMetadataCount), + stringify.NewStructField("FullEpochIndex", h.FullEpochIndex), + stringify.NewStructField("DiffEpochIndex", h.DiffEpochIndex), + stringify.NewStructField("LatestECRecord", h.LatestECRecord), ) } diff --git a/packages/core/ledger/testframework.go b/packages/core/ledger/testframework.go index 51ff8d054b..e4a2d9c7e7 100644 --- a/packages/core/ledger/testframework.go +++ b/packages/core/ledger/testframework.go @@ -340,7 +340,7 @@ func NewMockedInput(outputID utxo.OutputID) (new *MockedInput) { // String returns a human-readable version of the MockedInput. func (m *MockedInput) String() (humanReadable string) { return stringify.Struct("MockedInput", - stringify.StructField("OutputID", m.OutputID), + stringify.NewStructField("OutputID", m.OutputID), ) } diff --git a/packages/core/ledger/utxo/types.go b/packages/core/ledger/utxo/types.go index 3ea0b76449..5106d18c2e 100644 --- a/packages/core/ledger/utxo/types.go +++ b/packages/core/ledger/utxo/types.go @@ -237,9 +237,9 @@ func (o *Outputs) ForEach(callback func(output Output) error) (err error) { // Strings returns a human-readable version of the Outputs. func (o *Outputs) String() (humanReadable string) { - structBuilder := stringify.StructBuilder("Outputs") + structBuilder := stringify.NewStructBuilder("Outputs") _ = o.ForEach(func(output Output) error { - structBuilder.AddField(stringify.StructField(output.ID().String(), output)) + structBuilder.AddField(stringify.NewStructField(output.ID().String(), output)) return nil }) diff --git a/packages/core/ledger/vm/devnetvm/address.go b/packages/core/ledger/vm/devnetvm/address.go index 81135ddab2..56d7d40ff1 100644 --- a/packages/core/ledger/vm/devnetvm/address.go +++ b/packages/core/ledger/vm/devnetvm/address.go @@ -220,8 +220,8 @@ func (e *ED25519Address) Base58() string { // String returns a human readable version of the addresses for debug purposes. func (e *ED25519Address) String() string { return stringify.Struct("ED25519Address", - stringify.StructField("Digest", e.Digest()), - stringify.StructField("Base58", e.Base58()), + stringify.NewStructField("Digest", e.Digest()), + stringify.NewStructField("Base58", e.Base58()), ) } @@ -310,8 +310,8 @@ func (b *BLSAddress) Base58() string { // String returns a human readable version of the addresses for debug purposes. func (b *BLSAddress) String() string { return stringify.Struct("BLSAddress", - stringify.StructField("Digest", b.Digest()), - stringify.StructField("Base58", b.Base58()), + stringify.NewStructField("Digest", b.Digest()), + stringify.NewStructField("Base58", b.Base58()), ) } @@ -415,8 +415,8 @@ func (a *AliasAddress) Base58() string { // String returns a human readable version of the addresses for debug purposes. func (a *AliasAddress) String() string { return stringify.Struct("AliasAddress", - stringify.StructField("Digest", a.Digest()), - stringify.StructField("Base58", a.Base58()), + stringify.NewStructField("Digest", a.Digest()), + stringify.NewStructField("Base58", a.Base58()), ) } diff --git a/packages/core/ledger/vm/devnetvm/color.go b/packages/core/ledger/vm/devnetvm/color.go index 89ba55807c..3f331087de 100644 --- a/packages/core/ledger/vm/devnetvm/color.go +++ b/packages/core/ledger/vm/devnetvm/color.go @@ -184,9 +184,9 @@ func (c *ColoredBalances) Map() (balances map[Color]uint64) { // String returns a human-readable version of the ColoredBalances. func (c *ColoredBalances) String() string { - structBuilder := stringify.StructBuilder("ColoredBalances") + structBuilder := stringify.NewStructBuilder("ColoredBalances") c.ForEach(func(color Color, balance uint64) bool { - structBuilder.AddField(stringify.StructField(color.String(), balance)) + structBuilder.AddField(stringify.NewStructField(color.String(), balance)) return true }) diff --git a/packages/core/ledger/vm/devnetvm/input.go b/packages/core/ledger/vm/devnetvm/input.go index 9ef9e38c5e..a24181fc94 100644 --- a/packages/core/ledger/vm/devnetvm/input.go +++ b/packages/core/ledger/vm/devnetvm/input.go @@ -135,9 +135,9 @@ func (i Inputs) Clone() (clonedInputs Inputs) { // String returns a human-readable version of the Inputs. func (i Inputs) String() string { - structBuilder := stringify.StructBuilder("Inputs") + structBuilder := stringify.NewStructBuilder("Inputs") for i, input := range i { - structBuilder.AddField(stringify.StructField(strconv.Itoa(i), input)) + structBuilder.AddField(stringify.NewStructField(strconv.Itoa(i), input)) } return structBuilder.String() diff --git a/packages/core/ledger/vm/devnetvm/output.go b/packages/core/ledger/vm/devnetvm/output.go index 1ef588e257..1ccb8e35b8 100644 --- a/packages/core/ledger/vm/devnetvm/output.go +++ b/packages/core/ledger/vm/devnetvm/output.go @@ -250,9 +250,9 @@ func (o Outputs) Filter(condition func(output Output) bool) (filteredOutputs Out // String returns a human-readable version of the Outputs. func (o Outputs) String() string { - structBuilder := stringify.StructBuilder("Outputs") + structBuilder := stringify.NewStructBuilder("Outputs") for i, output := range o { - structBuilder.AddField(stringify.StructField(strconv.Itoa(i), output)) + structBuilder.AddField(stringify.NewStructField(strconv.Itoa(i), output)) } return structBuilder.String() @@ -316,9 +316,9 @@ func (o OutputsByID) Clone() (clonedOutputs OutputsByID) { // String returns a human readable version of the OutputsByID. func (o OutputsByID) String() string { - structBuilder := stringify.StructBuilder("OutputsByID") + structBuilder := stringify.NewStructBuilder("OutputsByID") for id, output := range o { - structBuilder.AddField(stringify.StructField(id.String(), output)) + structBuilder.AddField(stringify.NewStructField(id.String(), output)) } return structBuilder.String() @@ -1914,12 +1914,12 @@ func (o *ExtendedLockedOutput) Compare(other Output) int { // String returns a human readable version of the Output. func (o *ExtendedLockedOutput) String() string { return stringify.Struct("ExtendedLockedOutput", - stringify.StructField("id", o.ID()), - stringify.StructField("address", o.address), - stringify.StructField("balances", o.balances), - stringify.StructField("fallbackAddress", o.fallbackAddress), - stringify.StructField("fallbackDeadline", o.fallbackDeadline), - stringify.StructField("timelock", o.timelock), + stringify.NewStructField("id", o.ID()), + stringify.NewStructField("address", o.address), + stringify.NewStructField("balances", o.balances), + stringify.NewStructField("fallbackAddress", o.fallbackAddress), + stringify.NewStructField("fallbackDeadline", o.fallbackDeadline), + stringify.NewStructField("timelock", o.timelock), ) } diff --git a/packages/core/ledger/vm/devnetvm/signature.go b/packages/core/ledger/vm/devnetvm/signature.go index c71b4426c1..90d21c588a 100644 --- a/packages/core/ledger/vm/devnetvm/signature.go +++ b/packages/core/ledger/vm/devnetvm/signature.go @@ -190,8 +190,8 @@ func (e *ED25519Signature) Base58() string { // String returns a human readable version of the Signature. func (e *ED25519Signature) String() string { return stringify.Struct("ED25519Signature", - stringify.StructField("publicKey", e.PublicKey), - stringify.StructField("signature", e.Signature), + stringify.NewStructField("publicKey", e.PublicKey), + stringify.NewStructField("signature", e.Signature), ) } @@ -266,8 +266,8 @@ func (b *BLSSignature) Base58() string { // String returns a human readable version of the Signature. func (b *BLSSignature) String() string { return stringify.Struct("BLSSignature", - stringify.StructField("publicKey", b.Signature.PublicKey), - stringify.StructField("signature", b.Signature.Signature), + stringify.NewStructField("publicKey", b.Signature.PublicKey), + stringify.NewStructField("signature", b.Signature.Signature), ) } diff --git a/packages/core/ledger/vm/devnetvm/unlockblock.go b/packages/core/ledger/vm/devnetvm/unlockblock.go index 789f7b6567..6af12910a9 100644 --- a/packages/core/ledger/vm/devnetvm/unlockblock.go +++ b/packages/core/ledger/vm/devnetvm/unlockblock.go @@ -110,9 +110,9 @@ func (u UnlockBlocks) Bytes() []byte { // String returns a human readable version of the UnlockBlocks. func (u UnlockBlocks) String() string { - structBuilder := stringify.StructBuilder("UnlockBlocks") + structBuilder := stringify.NewStructBuilder("UnlockBlocks") for i, unlockBlock := range u { - structBuilder.AddField(stringify.StructField(strconv.Itoa(i), unlockBlock)) + structBuilder.AddField(stringify.NewStructField(strconv.Itoa(i), unlockBlock)) } return structBuilder.String() diff --git a/packages/core/mana/events.go b/packages/core/mana/events.go index 5c14207825..8f7a54b243 100644 --- a/packages/core/mana/events.go +++ b/packages/core/mana/events.go @@ -90,12 +90,12 @@ func (p *PledgedEvent) ToJSONSerializable() interface{} { // String returns a human readable version of the event. func (p *PledgedEvent) String() string { return stringify.Struct("PledgeEvent", - stringify.StructField("type", p.ManaType.String()), - stringify.StructField("shortNodeID", p.NodeID.String()), - stringify.StructField("fullNodeID", base58.Encode(p.NodeID.Bytes())), - stringify.StructField("time", p.Time.String()), - stringify.StructField("amount", p.Amount), - stringify.StructField("txID", p.TransactionID), + stringify.NewStructField("type", p.ManaType.String()), + stringify.NewStructField("shortNodeID", p.NodeID.String()), + stringify.NewStructField("fullNodeID", base58.Encode(p.NodeID.Bytes())), + stringify.NewStructField("time", p.Time.String()), + stringify.NewStructField("amount", p.Amount), + stringify.NewStructField("txID", p.TransactionID), ) } @@ -184,13 +184,13 @@ func (r *RevokedEvent) ToJSONSerializable() interface{} { // String returns a human readable version of the event. func (r *RevokedEvent) String() string { return stringify.Struct("RevokedEvent", - stringify.StructField("type", r.ManaType.String()), - stringify.StructField("shortNodeID", r.NodeID.String()), - stringify.StructField("fullNodeID", base58.Encode(r.NodeID.Bytes())), - stringify.StructField("time", r.Time.String()), - stringify.StructField("amount", r.Amount), - stringify.StructField("txID", r.TransactionID), - stringify.StructField("inputID", r.InputID), + stringify.NewStructField("type", r.ManaType.String()), + stringify.NewStructField("shortNodeID", r.NodeID.String()), + stringify.NewStructField("fullNodeID", base58.Encode(r.NodeID.Bytes())), + stringify.NewStructField("time", r.Time.String()), + stringify.NewStructField("amount", r.Amount), + stringify.NewStructField("txID", r.TransactionID), + stringify.NewStructField("inputID", r.InputID), ) } @@ -257,11 +257,11 @@ func (u *UpdatedEvent) ToJSONSerializable() interface{} { // String returns a human readable version of the event. func (u *UpdatedEvent) String() string { return stringify.Struct("UpdatedEvent", - stringify.StructField("type", u.ManaType.String()), - stringify.StructField("shortNodeID", u.NodeID.String()), - stringify.StructField("fullNodeID", base58.Encode(u.NodeID.Bytes())), - stringify.StructField("oldBaseMana", u.OldMana), - stringify.StructField("newBaseMana", u.NewMana), + stringify.NewStructField("type", u.ManaType.String()), + stringify.NewStructField("shortNodeID", u.NodeID.String()), + stringify.NewStructField("fullNodeID", base58.Encode(u.NodeID.Bytes())), + stringify.NewStructField("oldBaseMana", u.OldMana), + stringify.NewStructField("newBaseMana", u.NewMana), ) } diff --git a/packages/core/mana/txinfo.go b/packages/core/mana/txinfo.go index f40d4e79de..e9a7225875 100644 --- a/packages/core/mana/txinfo.go +++ b/packages/core/mana/txinfo.go @@ -81,8 +81,8 @@ func (s *SnapshotNode) Bytes() (serialized []byte) { // String returns a human-readable version of the SnapshotNode. func (s *SnapshotNode) String() (humanReadable string) { return stringify.Struct("SnapshotNode", - stringify.StructField("AccessMana", s.AccessMana), - stringify.StructField("SortedTxSnapshot", s.SortedTxSnapshot), + stringify.NewStructField("AccessMana", s.AccessMana), + stringify.NewStructField("SortedTxSnapshot", s.SortedTxSnapshot), ) } @@ -115,8 +115,8 @@ func (a *AccessManaSnapshot) Bytes() (serialized []byte) { // String returns a human-readable version of the AccessManaSnapshot. func (a AccessManaSnapshot) String() (humanReadable string) { return stringify.Struct("AccessManaSnapshot", - stringify.StructField("Value", a.Value), - stringify.StructField("Timestamp", a.Timestamp), + stringify.NewStructField("Value", a.Value), + stringify.NewStructField("Timestamp", a.Timestamp), ) } @@ -159,9 +159,9 @@ func (t *TxSnapshot) Bytes() (serialized []byte) { // String returns a human-readable version of the TxSnapshot. func (t *TxSnapshot) String() (humanReadable string) { return stringify.Struct("TxSnapshot", - stringify.StructField("Value", t.Value), - stringify.StructField("TxID", t.TxID), - stringify.StructField("Timestamp", t.Timestamp), + stringify.NewStructField("Value", t.Value), + stringify.NewStructField("TxID", t.TxID), + stringify.NewStructField("Timestamp", t.Timestamp), ) } diff --git a/packages/core/mana/types.go b/packages/core/mana/types.go index 0ab4ca0b22..ff91548c0c 100644 --- a/packages/core/mana/types.go +++ b/packages/core/mana/types.go @@ -1,6 +1,6 @@ -package mana // Type defines if mana is access or consensus type of mana. +package mana -// Type is the mana type. +// Type defines if mana is access or consensus type of mana. type Type byte const ( diff --git a/packages/core/markers/manager_test.go b/packages/core/markers/manager_test.go index 6f9a40337f..06aaf53a13 100644 --- a/packages/core/markers/manager_test.go +++ b/packages/core/markers/manager_test.go @@ -399,8 +399,8 @@ func newBlock(id string, parents ...string) *block { func (m *block) String() string { return stringify.Struct("block", - stringify.StructField("id", m.id), - stringify.StructField("forceNewMarker", m.forceNewMarker), - stringify.StructField("parents", m.parents), + stringify.NewStructField("id", m.id), + stringify.NewStructField("forceNewMarker", m.forceNewMarker), + stringify.NewStructField("parents", m.parents), ) } diff --git a/packages/core/markers/models.go b/packages/core/markers/models.go index 8dfd171ea9..f1232e884c 100644 --- a/packages/core/markers/models.go +++ b/packages/core/markers/models.go @@ -409,14 +409,14 @@ func (r *ReferencingMarkers) String() (humanReadableReferencingMarkers string) { } thresholdStart := "0" - referencingMarkers := stringify.StructBuilder("ReferencingMarkers") + referencingMarkers := stringify.NewStructBuilder("ReferencingMarkers") for _, index := range indexes { thresholdEnd := strconv.FormatUint(uint64(index), 10) if thresholdStart == thresholdEnd { - referencingMarkers.AddField(stringify.StructField("Index("+thresholdStart+")", referencingMarkersByReferencingIndex[index])) + referencingMarkers.AddField(stringify.NewStructField("Index("+thresholdStart+")", referencingMarkersByReferencingIndex[index])) } else { - referencingMarkers.AddField(stringify.StructField("Index("+thresholdStart+" ... "+thresholdEnd+")", referencingMarkersByReferencingIndex[index])) + referencingMarkers.AddField(stringify.NewStructField("Index("+thresholdStart+" ... "+thresholdEnd+")", referencingMarkersByReferencingIndex[index])) } thresholdStart = strconv.FormatUint(uint64(index)+1, 10) @@ -530,7 +530,7 @@ func (r *ReferencedMarkers) String() (humanReadableReferencedMarkers string) { } } - referencedMarkers := stringify.StructBuilder("ReferencedMarkers") + referencedMarkers := stringify.NewStructBuilder("ReferencedMarkers") for i, index := range indexes { thresholdStart := strconv.FormatUint(uint64(index), 10) thresholdEnd := "INF" @@ -539,9 +539,9 @@ func (r *ReferencedMarkers) String() (humanReadableReferencedMarkers string) { } if thresholdStart == thresholdEnd { - referencedMarkers.AddField(stringify.StructField("Index("+thresholdStart+")", referencedMarkersByReferencingIndex[index])) + referencedMarkers.AddField(stringify.NewStructField("Index("+thresholdStart+")", referencedMarkersByReferencingIndex[index])) } else { - referencedMarkers.AddField(stringify.StructField("Index("+thresholdStart+" ... "+thresholdEnd+")", referencedMarkersByReferencingIndex[index])) + referencedMarkers.AddField(stringify.NewStructField("Index("+thresholdStart+" ... "+thresholdEnd+")", referencedMarkersByReferencingIndex[index])) } } diff --git a/packages/core/notarization/commitments.go b/packages/core/notarization/commitments.go index 2347942b42..020335b9b9 100644 --- a/packages/core/notarization/commitments.go +++ b/packages/core/notarization/commitments.go @@ -3,10 +3,10 @@ package notarization import ( "context" - "github.com/iotaledger/hive.go/core/serix" - "github.com/celestiaorg/smt" "github.com/cockroachdb/errors" + "github.com/iotaledger/hive.go/core/identity" + "github.com/iotaledger/hive.go/core/serix" "github.com/iotaledger/goshimmer/packages/core/epoch" "github.com/iotaledger/goshimmer/packages/core/ledger" @@ -23,7 +23,7 @@ import ( "github.com/iotaledger/goshimmer/packages/core/tangleold" ) -// region Committment types //////////////////////////////////////////////////////////////////////////////////////////// +// region Commitment types //////////////////////////////////////////////////////////////////////////////////////////// // CommitmentRoots contains roots of trees of an epoch. type CommitmentRoots struct { @@ -32,6 +32,7 @@ type CommitmentRoots struct { stateMutationRoot epoch.MerkleRoot stateRoot epoch.MerkleRoot manaRoot epoch.MerkleRoot + activityRoot epoch.MerkleRoot } // CommitmentTrees is a compressed form of all the information (blocks and confirmed value payloads) of an epoch. @@ -39,6 +40,7 @@ type CommitmentTrees struct { EI epoch.Index tangleTree *smt.SparseMerkleTree stateMutationTree *smt.SparseMerkleTree + activityTree *smt.SparseMerkleTree } // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -54,7 +56,7 @@ type EpochCommitmentFactory struct { // stateRootTree stores the state tree at the LastCommittedEpoch. stateRootTree *smt.SparseMerkleTree - // manaRootTree stores the mana tree at the LastCommittedEpoch + 1. + // manaRootTree stores the mana tree at the LastCommittedEpoch. manaRootTree *smt.SparseMerkleTree // snapshotDepth defines how far back the ledgerstate is kept with respect to the latest committed epoch. @@ -91,34 +93,17 @@ func (f *EpochCommitmentFactory) ManaRoot() []byte { return f.manaRootTree.Root() } -// ECR retrieves the epoch commitment root. -func (f *EpochCommitmentFactory) ECR(ei epoch.Index) (ecr epoch.ECR, err error) { - epochRoots, err := f.newEpochRoots(ei) +// ECRandRoots retrieves the epoch commitment root. +func (f *EpochCommitmentFactory) ECRandRoots(ei epoch.Index) (ecr epoch.ECR, roots *epoch.CommitmentRoots, err error) { + roots, err = f.newEpochRoots(ei) if err != nil { - return epoch.MerkleRoot{}, errors.Wrap(err, "ECR could not be created") + return epoch.MerkleRoot{}, nil, errors.Wrap(err, "ECR could not be created") } - root := make([]byte, 0) - conflict1 := make([]byte, 0) - conflict2 := make([]byte, 0) - - conflict1Hashed := blake2b.Sum256(append(append(conflict1, epochRoots.tangleRoot[:]...), epochRoots.stateMutationRoot[:]...)) - conflict2Hashed := blake2b.Sum256(append(append(conflict2, epochRoots.stateRoot[:]...), epochRoots.manaRoot[:]...)) - rootHashed := blake2b.Sum256(append(append(root, conflict1Hashed[:]...), conflict2Hashed[:]...)) - - return epoch.NewMerkleRoot(rootHashed[:]), nil + return epoch.ComputeECR(roots.TangleRoot, roots.StateMutationRoot, roots.StateRoot, roots.ManaRoot), roots, nil } -// InsertStateLeaf inserts the outputID to the state sparse merkle tree. -func (f *EpochCommitmentFactory) insertStateLeaf(outputID utxo.OutputID) error { - _, err := f.stateRootTree.Update(outputID.Bytes(), outputID.Bytes()) - if err != nil { - return errors.Wrap(err, "could not insert leaf to the state tree") - } - return nil -} - -// RemoveStateLeaf removes the output ID from the ledger sparse merkle tree. +// removeStateLeaf removes the output ID from the ledger sparse merkle tree. func (f *EpochCommitmentFactory) removeStateLeaf(outputID utxo.OutputID) error { exists, _ := f.stateRootTree.Has(outputID.Bytes()) if exists { @@ -130,7 +115,7 @@ func (f *EpochCommitmentFactory) removeStateLeaf(outputID utxo.OutputID) error { return nil } -// UpdateManaLeaf updates the mana balance in the mana sparse merkle tree. +// updateManaLeaf updates the mana balance in the mana sparse merkle tree. func (f *EpochCommitmentFactory) updateManaLeaf(outputWithMetadata *ledger.OutputWithMetadata, isCreated bool) (err error) { outputBalance, exists := outputWithMetadata.Output().(devnetvm.Output).Balances().Get(devnetvm.ColorIOTA) if !exists { @@ -155,10 +140,7 @@ func (f *EpochCommitmentFactory) updateManaLeaf(outputWithMetadata *ledger.Outpu // remove leaf if mana is zero if currentBalance <= 0 { - if _, deleteLeafErr := f.manaRootTree.Delete(accountBytes); deleteLeafErr != nil { - return errors.Wrap(deleteLeafErr, "could not delete leaf from mana tree") - } - return nil + return removeLeaf(f.manaRootTree, accountBytes) } encodedBalanceBytes, encodeErr := serix.DefaultAPI.Encode(context.Background(), currentBalance, serix.WithValidation()) @@ -166,66 +148,61 @@ func (f *EpochCommitmentFactory) updateManaLeaf(outputWithMetadata *ledger.Outpu return errors.Wrap(encodeErr, "could not encode mana leaf balance") } - if _, updateLeafErr := f.manaRootTree.Update(accountBytes, encodedBalanceBytes); updateLeafErr != nil { - return errors.Wrap(updateLeafErr, "could not update mana tree leaf") - } - - return nil + return insertLeaf(f.manaRootTree, accountBytes, encodedBalanceBytes) } -// InsertStateMutationLeaf inserts the transaction ID to the state mutation sparse merkle tree. +// insertStateMutationLeaf inserts the transaction ID to the state mutation sparse merkle tree. func (f *EpochCommitmentFactory) insertStateMutationLeaf(ei epoch.Index, txID utxo.TransactionID) error { commitment, err := f.getCommitmentTrees(ei) if err != nil { return errors.Wrap(err, "could not get commitment while inserting state mutation leaf") } - _, err = commitment.stateMutationTree.Update(txID.Bytes(), txID.Bytes()) - if err != nil { - return errors.Wrap(err, "could not insert leaf to the state mutation tree") - } - return nil + return insertLeaf(commitment.stateMutationTree, txID.Bytes(), txID.Bytes()) } -// RemoveStateMutationLeaf deletes the transaction ID to the state mutation sparse merkle tree. +// removeStateMutationLeaf deletes the transaction ID to the state mutation sparse merkle tree. func (f *EpochCommitmentFactory) removeStateMutationLeaf(ei epoch.Index, txID utxo.TransactionID) error { commitment, err := f.getCommitmentTrees(ei) if err != nil { return errors.Wrap(err, "could not get commitment while deleting state mutation leaf") } - _, err = commitment.stateMutationTree.Delete(txID.Bytes()) - if err != nil { - return errors.Wrap(err, "could not delete leaf from the state mutation tree") - } - return nil + return removeLeaf(commitment.stateMutationTree, txID.Bytes()) } -// InsertTangleLeaf inserts blk to the Tangle sparse merkle tree. +// insertTangleLeaf inserts blk to the Tangle sparse merkle tree. func (f *EpochCommitmentFactory) insertTangleLeaf(ei epoch.Index, blkID tangleold.BlockID) error { commitment, err := f.getCommitmentTrees(ei) if err != nil { return errors.Wrap(err, "could not get commitment while inserting tangle leaf") } - _, err = commitment.tangleTree.Update(blkID.Bytes(), blkID.Bytes()) - if err != nil { - return errors.Wrap(err, "could not insert leaf to the tangle tree") - } - return nil + return insertLeaf(commitment.tangleTree, blkID.Bytes(), blkID.Bytes()) } -// RemoveTangleLeaf removes the block ID from the Tangle sparse merkle tree. +// removeTangleLeaf removes the block ID from the Tangle sparse merkle tree. func (f *EpochCommitmentFactory) removeTangleLeaf(ei epoch.Index, blkID tangleold.BlockID) error { commitment, err := f.getCommitmentTrees(ei) if err != nil { return errors.Wrap(err, "could not get commitment while deleting tangle leaf") } - exists, _ := commitment.tangleTree.Has(blkID.Bytes()) - if exists { - _, err2 := commitment.tangleTree.Delete(blkID.Bytes()) - if err2 != nil { - return errors.Wrap(err, "could not delete leaf from the tangle tree") - } + return removeLeaf(commitment.tangleTree, blkID.Bytes()) +} + +// insertActivityLeaf inserts nodeID to the Activity sparse merkle tree. +func (f *EpochCommitmentFactory) insertActivityLeaf(ei epoch.Index, nodeID identity.ID, acceptedInc ...uint64) error { + commitment, err := f.getCommitmentTrees(ei) + if err != nil { + return errors.Wrap(err, "could not get commitment while inserting activity leaf") } - return nil + return insertLeaf(commitment.activityTree, nodeID.Bytes(), nodeID.Bytes()) +} + +// removeActivityLeaf removes the nodeID from the Activity sparse merkle tree. +func (f *EpochCommitmentFactory) removeActivityLeaf(ei epoch.Index, nodeID identity.ID) error { + commitment, err := f.getCommitmentTrees(ei) + if err != nil { + return errors.Wrap(err, "could not get commitment while deleting activity leaf") + } + return removeLeaf(commitment.activityTree, nodeID.Bytes()) } // ecRecord retrieves the epoch commitment. @@ -235,7 +212,7 @@ func (f *EpochCommitmentFactory) ecRecord(ei epoch.Index) (ecRecord *epoch.ECRec return ecRecord, nil } // We never committed this epoch before, create and roll to a new epoch. - ecr, ecrErr := f.ECR(ei) + ecr, roots, ecrErr := f.ECRandRoots(ei) if ecrErr != nil { return nil, ecrErr } @@ -247,7 +224,8 @@ func (f *EpochCommitmentFactory) ecRecord(ei epoch.Index) (ecRecord *epoch.ECRec // Store and return. f.storage.CachedECRecord(ei, epoch.NewECRecord).Consume(func(e *epoch.ECRecord) { e.SetECR(ecr) - e.SetPrevEC(EC(prevECRecord)) + e.SetRoots(roots) + e.SetPrevEC(prevECRecord.ComputeEC()) ecRecord = e }) @@ -258,6 +236,7 @@ func (f *EpochCommitmentFactory) loadECRecord(ei epoch.Index) (ecRecord *epoch.E f.storage.CachedECRecord(ei).Consume(func(record *epoch.ECRecord) { ecRecord = epoch.NewECRecord(ei) ecRecord.SetECR(record.ECR()) + ecRecord.SetRoots(record.Roots()) ecRecord.SetPrevEC(record.PrevEC()) }) return @@ -322,9 +301,7 @@ func (f *EpochCommitmentFactory) loadDiffUTXOs(ei epoch.Index) (spent, created [ func (f *EpochCommitmentFactory) loadLedgerState(consumer func(*ledger.OutputWithMetadata)) { f.storage.ledgerstateStorage.ForEach(func(_ []byte, cachedOutputWithMetadata *objectstorage.CachedObject[*ledger.OutputWithMetadata]) bool { - cachedOutputWithMetadata.Consume(func(outputWithMetadata *ledger.OutputWithMetadata) { - consumer(outputWithMetadata) - }) + cachedOutputWithMetadata.Consume(consumer) return true }) @@ -339,18 +316,21 @@ func (f *EpochCommitmentFactory) newCommitmentTrees(ei epoch.Index) *CommitmentT blockValueStore := db.NewStore() stateMutationIDStore := db.NewStore() stateMutationValueStore := db.NewStore() + activityValueStore := db.NewStore() + activityIDStore := db.NewStore() commitmentTrees := &CommitmentTrees{ EI: ei, tangleTree: smt.NewSparseMerkleTree(blockIDStore, blockValueStore, lo.PanicOnErr(blake2b.New256(nil))), stateMutationTree: smt.NewSparseMerkleTree(stateMutationIDStore, stateMutationValueStore, lo.PanicOnErr(blake2b.New256(nil))), + activityTree: smt.NewSparseMerkleTree(activityIDStore, activityValueStore, lo.PanicOnErr(blake2b.New256(nil))), } return commitmentTrees } // newEpochRoots creates a new commitment with the given ei, by advancing the corresponding data structures. -func (f *EpochCommitmentFactory) newEpochRoots(ei epoch.Index) (commitmentRoots *CommitmentRoots, commitmentTreesErr error) { +func (f *EpochCommitmentFactory) newEpochRoots(ei epoch.Index) (commitmentRoots *epoch.CommitmentRoots, commitmentTreesErr error) { // TODO: what if a node restarts and we have incomplete trees? commitmentTrees, commitmentTreesErr := f.getCommitmentTrees(ei) if commitmentTreesErr != nil { @@ -366,12 +346,11 @@ func (f *EpochCommitmentFactory) newEpochRoots(ei epoch.Index) (commitmentRoots // We advance the LedgerState to the next epoch. f.commitLedgerState(ei - epoch.Index(f.snapshotDepth)) - commitmentRoots = &CommitmentRoots{ - EI: ei, - stateRoot: epoch.NewMerkleRoot(stateRoot), - manaRoot: epoch.NewMerkleRoot(manaRoot), - tangleRoot: epoch.NewMerkleRoot(commitmentTrees.tangleTree.Root()), - stateMutationRoot: epoch.NewMerkleRoot(commitmentTrees.stateMutationTree.Root()), + commitmentRoots = &epoch.CommitmentRoots{ + StateRoot: epoch.NewMerkleRoot(stateRoot), + ManaRoot: epoch.NewMerkleRoot(manaRoot), + TangleRoot: epoch.NewMerkleRoot(commitmentTrees.tangleTree.Root()), + StateMutationRoot: epoch.NewMerkleRoot(commitmentTrees.stateMutationTree.Root()), } // We are never going to use this epoch's commitment trees again. @@ -418,7 +397,7 @@ func (f *EpochCommitmentFactory) newStateRoots(ei epoch.Index) (stateRoot []byte // Insert created UTXOs into the state tree. for _, created := range createdOutputs { - err = f.insertStateLeaf(created.ID()) + err = insertLeaf(f.stateRootTree, created.ID().Bytes(), created.ID().Bytes()) if err != nil { return nil, nil, errors.Wrap(err, "could not insert the state leaf") } @@ -430,7 +409,7 @@ func (f *EpochCommitmentFactory) newStateRoots(ei epoch.Index) (stateRoot []byte // Remove spent UTXOs from the state tree. for _, spent := range spentOutputs { - err = f.removeStateLeaf(spent.ID()) + err = removeLeaf(f.stateRootTree, spent.ID().Bytes()) if err != nil { return nil, nil, errors.Wrap(err, "could not remove state leaf") } @@ -447,16 +426,25 @@ func (f *EpochCommitmentFactory) newStateRoots(ei epoch.Index) (stateRoot []byte // region extra functions ////////////////////////////////////////////////////////////////////////////////////////////// -// EC calculates the epoch commitment hash from the given ECRecord. -func EC(ecRecord *epoch.ECRecord) (ec epoch.EC) { - concatenated := make([]byte, 0) - concatenated = append(concatenated, ecRecord.EI().Bytes()...) - concatenated = append(concatenated, ecRecord.ECR().Bytes()...) - concatenated = append(concatenated, ecRecord.PrevEC().Bytes()...) - - ecHash := blake2b.Sum256(concatenated) +// insertLeaf inserts the outputID to the provided sparse merkle tree. +func insertLeaf(tree *smt.SparseMerkleTree, keyBytes, valueBytes []byte) error { + _, err := tree.Update(keyBytes, valueBytes) + if err != nil { + return errors.Wrap(err, "could not insert leaf to the tree") + } + return nil +} - return epoch.NewMerkleRoot(ecHash[:]) +// removeLeaf inserts the outputID to the provided sparse merkle tree. +func removeLeaf(tree *smt.SparseMerkleTree, leaf []byte) error { + exists, _ := tree.Has(leaf) + if exists { + _, err := tree.Delete(leaf) + if err != nil { + return errors.Wrap(err, "could not delete leaf from the tree") + } + } + return nil } // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/packages/core/notarization/events.go b/packages/core/notarization/events.go new file mode 100644 index 0000000000..90514370d1 --- /dev/null +++ b/packages/core/notarization/events.go @@ -0,0 +1,120 @@ +package notarization + +import ( + "github.com/iotaledger/goshimmer/packages/core/epoch" + "github.com/iotaledger/goshimmer/packages/core/ledger" + "github.com/iotaledger/goshimmer/packages/core/ledger/utxo" + "github.com/iotaledger/goshimmer/packages/core/tangleold" + "github.com/iotaledger/hive.go/core/generics/event" + "github.com/iotaledger/hive.go/core/identity" +) + +// region Events /////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Events is a container that acts as a dictionary for the existing events of a notarization manager. +type Events struct { + // EpochCommittable is an event that gets triggered whenever an epoch commitment is committable. + EpochCommittable *event.Event[*EpochCommittableEvent] + // EpochConfirmed is an event that gets triggered whenever an epoch is confirmed. + EpochConfirmed *event.Event[*EpochConfirmedEvent] + // CompetingCommitmentDetected is an event that gets triggered whenever a competing epoch commitment is detected. + CompetingCommitmentDetected *event.Event[*CompetingCommitmentDetectedEvent] + // ManaVectorUpdate is an event that gets triggered whenever the consensus mana vector needs to be updated. + ManaVectorUpdate *event.Event[*ManaVectorUpdateEvent] + // TangleTreeInserted is an event that gets triggered when a Block is inserted into the Tangle smt. + TangleTreeInserted *event.Event[*TangleTreeUpdatedEvent] + // TangleTreeRemoved is an event that gets triggered when a Block is removed from Tangle smt. + TangleTreeRemoved *event.Event[*TangleTreeUpdatedEvent] + // StateMutationTreeInserted is an event that gets triggered when a transaction is inserted into the state mutation smt. + StateMutationTreeInserted *event.Event[*StateMutationTreeUpdatedEvent] + // StateMutationTreeRemoved is an event that gets triggered when a transaction is removed from state mutation smt. + StateMutationTreeRemoved *event.Event[*StateMutationTreeUpdatedEvent] + // UTXOTreeInserted is an event that gets triggered when UTXOs are stored into the UTXO smt. + UTXOTreeInserted *event.Event[*UTXOUpdatedEvent] + // UTXOTreeRemoved is an event that gets triggered when UTXOs are removed from the UTXO smt. + UTXOTreeRemoved *event.Event[*UTXOUpdatedEvent] + // Bootstrapped is an event that gets triggered when a notarization manager has the last committable epoch relatively close to current epoch. + Bootstrapped *event.Event[*BootstrappedEvent] + // SyncRange is an event that gets triggered when an entire range of epochs needs to be requested, validated and solidified + SyncRange *event.Event[*SyncRangeEvent] + // ActivityTreeInserted is an event that gets triggered when nodeID is added to the activity tree. + ActivityTreeInserted *event.Event[*ActivityTreeUpdatedEvent] + // ActivityTreeRemoved is an event that gets triggered when nodeID is removed from activity tree. + ActivityTreeRemoved *event.Event[*ActivityTreeUpdatedEvent] +} + +// TangleTreeUpdatedEvent is a container that acts as a dictionary for the TangleTree inserted/removed event related parameters. +type TangleTreeUpdatedEvent struct { + // EI is the index of the block. + EI epoch.Index + // BlockID is the blockID that inserted/removed to/from the tangle smt. + BlockID tangleold.BlockID +} + +// BootstrappedEvent is an event that gets triggered when a notarization manager has the last committable epoch relatively close to current epoch. +type BootstrappedEvent struct { + // EI is the index of the last commitable epoch + EI epoch.Index +} + +// StateMutationTreeUpdatedEvent is a container that acts as a dictionary for the State mutation tree inserted/removed event related parameters. +type StateMutationTreeUpdatedEvent struct { + // EI is the index of the transaction. + EI epoch.Index + // TransactionID is the transaction ID that inserted/removed to/from the state mutation smt. + TransactionID utxo.TransactionID +} + +// UTXOUpdatedEvent is a container that acts as a dictionary for the UTXO update event related parameters. +type UTXOUpdatedEvent struct { + // EI is the index of updated UTXO. + EI epoch.Index + // Created are the outputs created in a transaction. + Created []*ledger.OutputWithMetadata + // Spent are outputs that is spent in a transaction. + Spent []*ledger.OutputWithMetadata +} + +// EpochCommittableEvent is a container that acts as a dictionary for the EpochCommittable event related parameters. +type EpochCommittableEvent struct { + // EI is the index of committable epoch. + EI epoch.Index + // ECRecord is the ec root of committable epoch. + ECRecord *epoch.ECRecord +} + +// EpochConfirmedEvent is a container that acts as a dictionary for the EpochConfirmed event related parameters. +type EpochConfirmedEvent struct { + // EI is the index of committable epoch. + EI epoch.Index +} + +// CompetingCommitmentDetectedEvent is a container that acts as a dictionary for the CompetingCommitmentDetectedEvent event related parameters. +type CompetingCommitmentDetectedEvent struct { + // Block is the block that contains the competing commitment. + Block *tangleold.Block +} + +// ManaVectorUpdateEvent is a container that acts as a dictionary for the EpochCommittable event related parameters. +type ManaVectorUpdateEvent struct { + // EI is the index of committable epoch. + EI epoch.Index +} + +// SyncRangeEvent is a container that acts as a dictionary for the SyncRange event related parameters. +type SyncRangeEvent struct { + StartEI epoch.Index + EndEI epoch.Index + StartEC epoch.EC + EndPrevEC epoch.EC +} + +// ActivityTreeUpdatedEvent is a container that acts as a dictionary for the ActivityTree inserted/removed event related parameters. +type ActivityTreeUpdatedEvent struct { + // EI is the index of the epoch. + EI epoch.Index + // NodeID is the issuer nodeID. + NodeID identity.ID +} + +// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/packages/core/notarization/manager.go b/packages/core/notarization/manager.go index ce147b987b..9d9c5fa64d 100644 --- a/packages/core/notarization/manager.go +++ b/packages/core/notarization/manager.go @@ -4,9 +4,12 @@ import ( "sync" "time" + "github.com/iotaledger/hive.go/core/identity" + "github.com/cockroachdb/errors" "github.com/iotaledger/hive.go/core/generics/event" + "github.com/iotaledger/hive.go/core/generics/lo" "github.com/iotaledger/hive.go/core/logger" "github.com/iotaledger/goshimmer/packages/core/conflictdag" @@ -65,9 +68,16 @@ func NewManager(epochCommitmentFactory *EpochCommitmentFactory, t *tangleold.Tan EpochCommittable: event.New[*EpochCommittableEvent](), ManaVectorUpdate: event.New[*ManaVectorUpdateEvent](), Bootstrapped: event.New[*BootstrappedEvent](), + SyncRange: event.New[*SyncRangeEvent](), + ActivityTreeInserted: event.New[*ActivityTreeUpdatedEvent](), + ActivityTreeRemoved: event.New[*ActivityTreeUpdatedEvent](), }, } + new.tangle.Storage.Events.BlockStored.Attach(event.NewClosure(func(event *tangleold.BlockStoredEvent) { + new.OnBlockStored(event.Block) + })) + new.tangle.ConfirmationOracle.Events().BlockAccepted.Attach(onlyIfBootstrapped(t.TimeManager, func(event *tangleold.BlockAcceptedEvent) { new.OnBlockAccepted(event.Block) })) @@ -100,23 +110,11 @@ func NewManager(epochCommitmentFactory *EpochCommitmentFactory, t *tangleold.Tan new.OnAcceptanceTimeUpdated(event.ATT) })) - return new -} - -func (m *Manager) ReadLockLedger() { - m.epochCommitmentFactoryMutex.RLock() -} - -func (m *Manager) ReadUnlockLedger() { - m.epochCommitmentFactoryMutex.RUnlock() -} - -func (m *Manager) WriteLockLedger() { - m.epochCommitmentFactoryMutex.Lock() -} + new.tangle.Storage.Events.BlockStored.Attach(event.NewClosure(func(event *tangleold.BlockStoredEvent) { + new.OnBlockStored(event.Block) + })) -func (m *Manager) WriteUnlockLedger() { - m.epochCommitmentFactoryMutex.Unlock() + return new } func onlyIfBootstrapped[E any](timeManager *tangleold.TimeManager, handler func(event E)) *event.Closure[E] { @@ -128,14 +126,42 @@ func onlyIfBootstrapped[E any](timeManager *tangleold.TimeManager, handler func( }) } +// StartSnapshot locks the commitment factory and returns the latest ecRecord and last confirmed epoch index. +func (m *Manager) StartSnapshot() (fullEpochIndex epoch.Index, ecRecord *epoch.ECRecord, err error) { + m.epochCommitmentFactoryMutex.RLock() + + latestConfirmedEpoch, err := m.LatestConfirmedEpochIndex() + if err != nil { + return + } + ecRecord = m.epochCommitmentFactory.loadECRecord(latestConfirmedEpoch) + if ecRecord == nil { + err = errors.Errorf("could not get latest commitment") + return + } + + // The snapshottable ledgerstate always sits at latestConfirmedEpoch - snapshotDepth + fullEpochIndex = latestConfirmedEpoch - epoch.Index(m.epochCommitmentFactory.snapshotDepth) + if fullEpochIndex < 0 { + fullEpochIndex = 0 + } + + return +} + +// EndSnapshot unlocks the commitment factory when the snapshotting completes. +func (m *Manager) EndSnapshot() { + m.epochCommitmentFactoryMutex.RUnlock() +} + // LoadOutputsWithMetadata initiates the state and mana trees from a given snapshot. func (m *Manager) LoadOutputsWithMetadata(outputsWithMetadatas []*ledger.OutputWithMetadata) { - m.WriteLockLedger() - defer m.WriteUnlockLedger() + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() for _, outputWithMetadata := range outputsWithMetadatas { m.epochCommitmentFactory.storage.ledgerstateStorage.Store(outputWithMetadata).Release() - err := m.epochCommitmentFactory.insertStateLeaf(outputWithMetadata.ID()) + err := insertLeaf(m.epochCommitmentFactory.stateRootTree, outputWithMetadata.ID().Bytes(), outputWithMetadata.ID().Bytes()) if err != nil { m.log.Error(err) } @@ -147,39 +173,39 @@ func (m *Manager) LoadOutputsWithMetadata(outputsWithMetadatas []*ledger.OutputW } // LoadEpochDiffs updates the state tree from a given snapshot. -func (m *Manager) LoadEpochDiffs(header *ledger.SnapshotHeader, epochDiffs map[epoch.Index]*ledger.EpochDiff) { - m.WriteLockLedger() - defer m.WriteUnlockLedger() - - for ei := header.FullEpochIndex + 1; ei <= header.DiffEpochIndex; ei++ { - epochDiff := epochDiffs[ei] - for _, spentOutputWithMetadata := range epochDiff.Spent() { - spentOutputIDBytes := spentOutputWithMetadata.ID().Bytes() - m.epochCommitmentFactory.storage.ledgerstateStorage.Delete(spentOutputIDBytes) - if has, _ := m.epochCommitmentFactory.stateRootTree.Has(spentOutputIDBytes); !has { - panic("epoch diff spends an output not contained in the ledger state") - } - _, err := m.epochCommitmentFactory.stateRootTree.Delete(spentOutputIDBytes) - if err != nil { - panic("could not delete leaf from state root tree") - } +func (m *Manager) LoadEpochDiff(epochDiff *ledger.EpochDiff) { + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() + + for _, spentOutputWithMetadata := range epochDiff.Spent() { + spentOutputIDBytes := spentOutputWithMetadata.ID().Bytes() + if has := m.epochCommitmentFactory.storage.ledgerstateStorage.DeleteIfPresent(spentOutputIDBytes); !has { + panic("epoch diff spends an output not contained in the ledger state") } - for _, createdOutputWithMetadata := range epochDiff.Created() { - createdOutputIDBytes := createdOutputWithMetadata.ID().Bytes() - m.epochCommitmentFactory.storage.ledgerstateStorage.Store(createdOutputWithMetadata).Release() - _, err := m.epochCommitmentFactory.stateRootTree.Update(createdOutputIDBytes, createdOutputIDBytes) - if err != nil { - panic("could not update leaf of state root tree") - } + if has, _ := m.epochCommitmentFactory.stateRootTree.Has(spentOutputIDBytes); !has { + panic("epoch diff spends an output not contained in the state tree") + } + _, err := m.epochCommitmentFactory.stateRootTree.Delete(spentOutputIDBytes) + if err != nil { + panic("could not delete leaf from state root tree") + } + } + for _, createdOutputWithMetadata := range epochDiff.Created() { + createdOutputIDBytes := createdOutputWithMetadata.ID().Bytes() + m.epochCommitmentFactory.storage.ledgerstateStorage.Store(createdOutputWithMetadata).Release() + _, err := m.epochCommitmentFactory.stateRootTree.Update(createdOutputIDBytes, createdOutputIDBytes) + if err != nil { + panic("could not update leaf of state root tree") } } + return } // LoadECandEIs initiates the ECRecord, latest committable EI, last confirmed EI and acceptance EI from a given snapshot. func (m *Manager) LoadECandEIs(header *ledger.SnapshotHeader) { - m.WriteLockLedger() - defer m.WriteUnlockLedger() + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() // The last committed epoch index corresponds to the last epoch diff stored in the snapshot. if err := m.epochCommitmentFactory.storage.setLatestCommittableEpochIndex(header.DiffEpochIndex); err != nil { @@ -199,27 +225,43 @@ func (m *Manager) LoadECandEIs(header *ledger.SnapshotHeader) { m.epochCommitmentFactory.storage.ecRecordStorage.Store(header.LatestECRecord).Release() } -// SnapshotEpochDiffs returns the EpochDiffs when a snapshot is created. -func (m *Manager) SnapshotEpochDiffs(lastConfirmedEpoch, latestCommittableEpoch epoch.Index) (map[epoch.Index]*ledger.EpochDiff, error) { - epochDiffsMap := make(map[epoch.Index]*ledger.EpochDiff) - for ei := lastConfirmedEpoch + 1; ei <= latestCommittableEpoch; ei++ { - spent, created := m.epochCommitmentFactory.loadDiffUTXOs(ei) - epochDiffsMap[ei] = ledger.NewEpochDiff(spent, created) +// LoadActivityLogs loads activity logs from the snapshot and updates the activity tree. +func (m *Manager) LoadActivityLogs(epochActivity epoch.SnapshotEpochActivity) { + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() + + for ei, nodeActivity := range epochActivity { + for nodeID, acceptedCount := range nodeActivity.NodesLog() { + err := m.epochCommitmentFactory.insertActivityLeaf(ei, nodeID, acceptedCount) + if err != nil { + m.log.Error(err) + } + } } +} + +// SnapshotEpochDiffs returns the EpochDiffs when a snapshot is created. +func (m *Manager) SnapshotEpochDiffs(fullEpochIndex, latestCommitableEpoch epoch.Index, prodChan chan *ledger.EpochDiff, stopChan chan struct{}) { + go func() { + for ei := fullEpochIndex; ei <= latestCommitableEpoch; ei++ { + spent, created := m.epochCommitmentFactory.loadDiffUTXOs(ei) + prodChan <- ledger.NewEpochDiff(spent, created) + } + + close(stopChan) + }() - return epochDiffsMap, nil + return } // SnapshotLedgerState returns the all confirmed OutputsWithMetadata when a snapshot is created. -func (m *Manager) SnapshotLedgerState(lastConfirmedEpoch epoch.Index, prodChan chan *ledger.OutputWithMetadata) { +func (m *Manager) SnapshotLedgerState(prodChan chan *ledger.OutputWithMetadata, stopChan chan struct{}) { + // No need to lock because this is called in the context of a StartSnapshot. go func() { m.epochCommitmentFactory.loadLedgerState(func(o *ledger.OutputWithMetadata) { - index := epoch.IndexFromTime(o.CreationTime()) - if index <= lastConfirmedEpoch { - prodChan <- o - } + prodChan <- o }) - close(prodChan) + close(stopChan) }() return @@ -227,8 +269,8 @@ func (m *Manager) SnapshotLedgerState(lastConfirmedEpoch epoch.Index, prodChan c // GetLatestEC returns the latest commitment that a new block should commit to. func (m *Manager) GetLatestEC() (ecRecord *epoch.ECRecord, err error) { - m.ReadLockLedger() - defer m.ReadUnlockLedger() + m.epochCommitmentFactoryMutex.RLock() + defer m.epochCommitmentFactoryMutex.RUnlock() latestCommittableEpoch, err := m.epochCommitmentFactory.storage.latestCommittableEpochIndex() ecRecord = m.epochCommitmentFactory.loadECRecord(latestCommittableEpoch) @@ -240,16 +282,16 @@ func (m *Manager) GetLatestEC() (ecRecord *epoch.ECRecord, err error) { // LatestConfirmedEpochIndex returns the latest epoch index that has been confirmed. func (m *Manager) LatestConfirmedEpochIndex() (epoch.Index, error) { - m.ReadLockLedger() - defer m.ReadUnlockLedger() + m.epochCommitmentFactoryMutex.RLock() + defer m.epochCommitmentFactoryMutex.RUnlock() return m.epochCommitmentFactory.storage.lastConfirmedEpochIndex() } // OnBlockAccepted is the handler for block confirmed event. func (m *Manager) OnBlockAccepted(block *tangleold.Block) { - m.WriteLockLedger() - defer m.WriteUnlockLedger() + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() ei := epoch.IndexFromTime(block.IssuingTime()) @@ -267,10 +309,42 @@ func (m *Manager) OnBlockAccepted(block *tangleold.Block) { m.Events.TangleTreeInserted.Trigger(&TangleTreeUpdatedEvent{EI: ei, BlockID: block.ID()}) } +// OnBlockStored is a handler fo Block stored event that updates the activity log and triggers warpsyncing. +func (m *Manager) OnBlockStored(block *tangleold.Block) { + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() + + ei := epoch.IndexFromTime(block.IssuingTime()) + + nodeID := identity.NewID(block.IssuerPublicKey()) + err := m.epochCommitmentFactory.insertActivityLeaf(ei, nodeID) + if err != nil && m.log != nil { + m.log.Error(err) + return + } + m.Events.ActivityTreeInserted.Trigger(&ActivityTreeUpdatedEvent{EI: ei, NodeID: nodeID}) + + blockEI := block.ECRecordEI() + latestCommittableEI := lo.PanicOnErr(m.epochCommitmentFactory.storage.latestCommittableEpochIndex()) + epochDeltaSeconds := time.Duration(int64(blockEI-latestCommittableEI)*epoch.Duration) * time.Second + + m.log.Debugf("block committing to epoch %d stored, latest committable epoch is %d", blockEI, latestCommittableEI) + + // If we are too far behind, we will warpsync + if epochDeltaSeconds > m.options.BootstrapWindow { + m.Events.SyncRange.Trigger(&SyncRangeEvent{ + StartEI: latestCommittableEI, + EndEI: blockEI, + StartEC: m.epochCommitmentFactory.loadECRecord(latestCommittableEI).ComputeEC(), + EndPrevEC: block.PrevEC(), + }) + } +} + // OnBlockOrphaned is the handler for block orphaned event. func (m *Manager) OnBlockOrphaned(block *tangleold.Block) { - m.WriteLockLedger() - defer m.WriteUnlockLedger() + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() ei := epoch.IndexFromTime(block.IssuingTime()) if m.isEpochAlreadyCommitted(ei) { @@ -283,7 +357,26 @@ func (m *Manager) OnBlockOrphaned(block *tangleold.Block) { } m.Events.TangleTreeRemoved.Trigger(&TangleTreeUpdatedEvent{EI: ei, BlockID: block.ID()}) + transaction, isTransaction := block.Payload().(utxo.Transaction) + nodeID := identity.NewID(block.IssuerPublicKey()) + + updatedCount := uint64(1) + // if block has been accepted, counter was increased two times, on booking and on acceptance + if m.tangle.ConfirmationOracle.IsBlockConfirmed(block.ID()) { + updatedCount++ + } + + noActivityLeft := m.tangle.WeightProvider.Remove(ei, nodeID, updatedCount) + if noActivityLeft { + err = m.epochCommitmentFactory.removeActivityLeaf(ei, nodeID) + if err != nil && m.log != nil { + m.log.Error(err) + return + } + m.Events.ActivityTreeRemoved.Trigger(&ActivityTreeUpdatedEvent{EI: ei, NodeID: nodeID}) + } + if isTransaction { spent, created := m.resolveOutputs(transaction) m.epochCommitmentFactory.deleteDiffUTXOs(ei, created, spent) @@ -293,8 +386,8 @@ func (m *Manager) OnBlockOrphaned(block *tangleold.Block) { // OnTransactionAccepted is the handler for transaction accepted event. func (m *Manager) OnTransactionAccepted(event *ledger.TransactionAcceptedEvent) { - m.WriteLockLedger() - defer m.WriteUnlockLedger() + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() txID := event.TransactionID @@ -320,8 +413,8 @@ func (m *Manager) OnTransactionAccepted(event *ledger.TransactionAcceptedEvent) // OnTransactionInclusionUpdated is the handler for transaction inclusion updated event. func (m *Manager) OnTransactionInclusionUpdated(event *ledger.TransactionInclusionUpdatedEvent) { - m.WriteLockLedger() - defer m.WriteUnlockLedger() + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() oldEpoch := epoch.IndexFromTime(event.PreviousInclusionTime) newEpoch := epoch.IndexFromTime(event.InclusionTime) @@ -359,8 +452,8 @@ func (m *Manager) OnConflictAccepted(conflictID utxo.TransactionID) { // OnConflictConfirmed is the handler for conflict confirmed event. func (m *Manager) onConflictAccepted(conflictID utxo.TransactionID) ([]*EpochCommittableEvent, []*ManaVectorUpdateEvent) { - m.WriteLockLedger() - defer m.WriteUnlockLedger() + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() ei := m.getConflictEI(conflictID) @@ -373,8 +466,8 @@ func (m *Manager) onConflictAccepted(conflictID utxo.TransactionID) ([]*EpochCom // OnConflictCreated is the handler for conflict created event. func (m *Manager) OnConflictCreated(conflictID utxo.TransactionID) { - m.WriteLockLedger() - defer m.WriteUnlockLedger() + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() ei := m.getConflictEI(conflictID) @@ -393,8 +486,8 @@ func (m *Manager) OnConflictRejected(conflictID utxo.TransactionID) { // OnConflictRejected is the handler for conflict created event. func (m *Manager) onConflictRejected(conflictID utxo.TransactionID) ([]*EpochCommittableEvent, []*ManaVectorUpdateEvent) { - m.WriteLockLedger() - defer m.WriteUnlockLedger() + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() ei := m.getConflictEI(conflictID) @@ -413,8 +506,8 @@ func (m *Manager) OnAcceptanceTimeUpdated(newTime time.Time) { // OnAcceptanceTimeUpdated is the handler for time updated event and returns events to be triggered. func (m *Manager) onAcceptanceTimeUpdated(newTime time.Time) ([]*EpochCommittableEvent, []*ManaVectorUpdateEvent) { - m.WriteLockLedger() - defer m.WriteUnlockLedger() + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() ei := epoch.IndexFromTime(newTime) currentEpochIndex, err := m.epochCommitmentFactory.storage.acceptanceEpochIndex() @@ -422,6 +515,7 @@ func (m *Manager) onAcceptanceTimeUpdated(newTime time.Time) ([]*EpochCommittabl m.log.Error(errors.Wrap(err, "could not get current epoch index")) return nil, nil } + // moved to the next epoch if ei > currentEpochIndex { err = m.epochCommitmentFactory.storage.setAcceptanceEpochIndex(ei) if err != nil { @@ -433,13 +527,11 @@ func (m *Manager) onAcceptanceTimeUpdated(newTime time.Time) ([]*EpochCommittabl return nil, nil } -// PendingConflictsCount returns the current value of pendingConflictsCount. -func (m *Manager) PendingConflictsCount(ei epoch.Index) (pendingConflictsCount uint64) { - return m.pendingConflictsCounters[ei] -} - // PendingConflictsCountAll returns the current value of pendingConflictsCount per epoch. func (m *Manager) PendingConflictsCountAll() (pendingConflicts map[epoch.Index]uint64) { + m.epochCommitmentFactoryMutex.RLock() + defer m.epochCommitmentFactoryMutex.RUnlock() + pendingConflicts = make(map[epoch.Index]uint64, len(m.pendingConflictsCounters)) for k, v := range m.pendingConflictsCounters { pendingConflicts[k] = v @@ -447,17 +539,26 @@ func (m *Manager) PendingConflictsCountAll() (pendingConflicts map[epoch.Index]u return pendingConflicts } +// GetEpochDiff returns the epoch diff of an epoch. +func (m *Manager) GetEpochDiff(ei epoch.Index) (spent []*ledger.OutputWithMetadata, created []*ledger.OutputWithMetadata) { + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() + spent, created = m.epochCommitmentFactory.loadDiffUTXOs(ei) + return +} + // Bootstrapped returns the current value of pendingConflictsCount per epoch. func (m *Manager) Bootstrapped() bool { m.bootstrapMutex.RLock() defer m.bootstrapMutex.RUnlock() + return m.bootstrapped } // Shutdown shuts down the manager's permanent storagee. func (m *Manager) Shutdown() { - m.WriteLockLedger() - defer m.WriteUnlockLedger() + m.epochCommitmentFactoryMutex.Lock() + defer m.epochCommitmentFactoryMutex.Unlock() m.epochCommitmentFactory.storage.shutdown() } @@ -575,15 +676,8 @@ func (m *Manager) resolveOutputs(tx utxo.Transaction) (spentOutputsWithMetadata, } func (m *Manager) manaVectorUpdate(ei epoch.Index) (event *ManaVectorUpdateEvent) { - epochForManaVector := ei - epoch.Index(m.options.ManaEpochDelay) - if epochForManaVector < 1 { - return - } - spent, created := m.epochCommitmentFactory.loadDiffUTXOs(epochForManaVector) return &ManaVectorUpdateEvent{ - EI: ei, - EpochDiffCreated: created, - EpochDiffSpent: spent, + EI: ei, } } @@ -614,7 +708,10 @@ func (m *Manager) moveLatestCommittableEpoch(currentEpoch epoch.Index) ([]*Epoch return nil, nil } - epochCommittableEvents = append(epochCommittableEvents, &EpochCommittableEvent{EI: ei, ECRecord: ecRecord}) + epochCommittableEvents = append(epochCommittableEvents, &EpochCommittableEvent{ + EI: ei, + ECRecord: ecRecord, + }) if manaVectorUpdateEvent := m.manaVectorUpdate(ei); manaVectorUpdateEvent != nil { manaVectorUpdateEvents = append(manaVectorUpdateEvents, manaVectorUpdateEvent) } @@ -640,6 +737,12 @@ func (m *Manager) updateEpochsBootstrapped(ei epoch.Index) { } } +// SnapshotEpochActivity snapshots accepted block counts from activity tree and updates provided SnapshotEpochActivity. +func (m *Manager) SnapshotEpochActivity(epochDiffIndex epoch.Index) (epochActivity epoch.SnapshotEpochActivity, err error) { + epochActivity = m.tangle.WeightProvider.SnapshotEpochActivity(epochDiffIndex) + return +} + // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// // region Options ////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -651,7 +754,6 @@ type ManagerOption func(options *ManagerOptions) type ManagerOptions struct { MinCommittableEpochAge time.Duration BootstrapWindow time.Duration - ManaEpochDelay uint Log *logger.Logger } @@ -669,13 +771,6 @@ func BootstrapWindow(d time.Duration) ManagerOption { } } -// ManaDelay specifies the epoch offset for mana vector from the last committable epoch. -func ManaDelay(d uint) ManagerOption { - return func(options *ManagerOptions) { - options.ManaEpochDelay = d - } -} - // Log provides the logger. func Log(log *logger.Logger) ManagerOption { return func(options *ManagerOptions) { @@ -684,76 +779,3 @@ func Log(log *logger.Logger) ManagerOption { } // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// - -// region Events /////////////////////////////////////////////////////////////////////////////////////////////////////// - -// Events is a container that acts as a dictionary for the existing events of a notarization manager. -type Events struct { - // EpochCommittable is an event that gets triggered whenever an epoch commitment is committable. - EpochCommittable *event.Event[*EpochCommittableEvent] - ManaVectorUpdate *event.Event[*ManaVectorUpdateEvent] - // TangleTreeInserted is an event that gets triggered when a Block is inserted into the Tangle smt. - TangleTreeInserted *event.Event[*TangleTreeUpdatedEvent] - // TangleTreeRemoved is an event that gets triggered when a Block is removed from Tangle smt. - TangleTreeRemoved *event.Event[*TangleTreeUpdatedEvent] - // StateMutationTreeInserted is an event that gets triggered when a transaction is inserted into the state mutation smt. - StateMutationTreeInserted *event.Event[*StateMutationTreeUpdatedEvent] - // StateMutationTreeRemoved is an event that gets triggered when a transaction is removed from state mutation smt. - StateMutationTreeRemoved *event.Event[*StateMutationTreeUpdatedEvent] - // UTXOTreeInserted is an event that gets triggered when UTXOs are stored into the UTXO smt. - UTXOTreeInserted *event.Event[*UTXOUpdatedEvent] - // UTXOTreeRemoved is an event that gets triggered when UTXOs are removed from the UTXO smt. - UTXOTreeRemoved *event.Event[*UTXOUpdatedEvent] - // Bootstrapped is an event that gets triggered when a notarization manager has the last committable epoch relatively close to current epoch. - Bootstrapped *event.Event[*BootstrappedEvent] -} - -// TangleTreeUpdatedEvent is a container that acts as a dictionary for the TangleTree inserted/removed event related parameters. -type TangleTreeUpdatedEvent struct { - // EI is the index of the block. - EI epoch.Index - // BlockID is the blockID that inserted/removed to/from the tangle smt. - BlockID tangleold.BlockID -} - -// BootstrappedEvent is an event that gets triggered when a notarization manager has the last committable epoch relatively close to current epoch. -type BootstrappedEvent struct { - // EI is the index of the last commitable epoch - EI epoch.Index -} - -// StateMutationTreeUpdatedEvent is a container that acts as a dictionary for the State mutation tree inserted/removed event related parameters. -type StateMutationTreeUpdatedEvent struct { - // EI is the index of the transaction. - EI epoch.Index - // TransactionID is the transaction ID that inserted/removed to/from the state mutation smt. - TransactionID utxo.TransactionID -} - -// UTXOUpdatedEvent is a container that acts as a dictionary for the UTXO update event related parameters. -type UTXOUpdatedEvent struct { - // EI is the index of updated UTXO. - EI epoch.Index - // Created are the outputs created in a transaction. - Created []*ledger.OutputWithMetadata - // Spent are outputs that is spent in a transaction. - Spent []*ledger.OutputWithMetadata -} - -// EpochCommittableEvent is a container that acts as a dictionary for the EpochCommittable event related parameters. -type EpochCommittableEvent struct { - // EI is the index of committable epoch. - EI epoch.Index - // ECRecord is the ec root of committable epoch. - ECRecord *epoch.ECRecord -} - -// ManaVectorUpdateEvent is a container that acts as a dictionary for the EpochCommittable event related parameters. -type ManaVectorUpdateEvent struct { - // EI is the index of committable epoch. - EI epoch.Index - EpochDiffCreated []*ledger.OutputWithMetadata - EpochDiffSpent []*ledger.OutputWithMetadata -} - -// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/packages/core/notarization/manager_test.go b/packages/core/notarization/manager_test.go index dc14096402..6a7ad01193 100644 --- a/packages/core/notarization/manager_test.go +++ b/packages/core/notarization/manager_test.go @@ -77,12 +77,15 @@ func TestManager_GetLatestEC(t *testing.T) { } var weightProvider *tangleold.CManaWeightProvider manaRetrieverMock := func() map[identity.ID]float64 { - weightProvider.Update(time.Now(), nodes["A"].ID()) + ei := epoch.IndexFromTime(time.Now()) + weightProvider.Update(ei, nodes["A"].ID()) return map[identity.ID]float64{ nodes["A"].ID(): 100, } } - weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, time.Now) + confirmedRetrieverFunc := func() epoch.Index { return 0 } + + weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, time.Now, confirmedRetrieverFunc) genesisTime := time.Now().Add(-25 * time.Minute) epochDuration := 5 * time.Minute @@ -141,7 +144,8 @@ func TestManager_UpdateTangleTree(t *testing.T) { var weightProvider *tangleold.CManaWeightProvider manaRetrieverMock := func() map[identity.ID]float64 { for _, node := range nodes { - weightProvider.Update(time.Now(), node.ID()) + ei := epoch.IndexFromTime(time.Now()) + weightProvider.Update(ei, node.ID()) } return map[identity.ID]float64{ nodes["A"].ID(): 30, @@ -150,7 +154,8 @@ func TestManager_UpdateTangleTree(t *testing.T) { nodes["D"].ID(): 25, } } - weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, time.Now) + confirmedRetrieverFunc := func() epoch.Index { return 0 } + weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, time.Now, confirmedRetrieverFunc) epochInterval := 1 * time.Second @@ -169,14 +174,14 @@ func TestManager_UpdateTangleTree(t *testing.T) { ecRecord, _, err := testFramework.LatestCommitment() require.NoError(t, err) - EC0 = EC(ecRecord) + EC0 = ecRecord.ComputeEC() // PrevEC of Epoch0 is the empty Merkle Root assert.Equal(t, epoch.MerkleRoot{}, ecRecord.PrevEC()) testFramework.CreateBlock("Block1", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Genesis"), tangleold.WithIssuer(nodes["A"].PublicKey()), tangleold.WithECRecord(ecRecord)) testFramework.IssueBlocks("Block1").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block1") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } issuingTime = issuingTime.Add(epochInterval) @@ -187,14 +192,14 @@ func TestManager_UpdateTangleTree(t *testing.T) { ecRecord, _, err := testFramework.LatestCommitment() require.NoError(t, err) - assert.Equal(t, EC0, EC(ecRecord)) + assert.Equal(t, EC0, ecRecord.ComputeEC()) // PrevEC of Epoch0 is the empty Merkle Root assert.Equal(t, epoch.MerkleRoot{}, ecRecord.PrevEC()) testFramework.CreateBlock("Block2", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Block1"), tangleold.WithIssuer(nodes["B"].PublicKey()), tangleold.WithECRecord(ecRecord)) testFramework.IssueBlocks("Block2").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block2") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } assertExistenceOfBlock(t, testFramework, notarizationMgr, map[string]bool{ @@ -209,14 +214,14 @@ func TestManager_UpdateTangleTree(t *testing.T) { ecRecord, _, err := testFramework.LatestCommitment() require.NoError(t, err) - assert.Equal(t, EC0, EC(ecRecord)) + assert.Equal(t, EC0, ecRecord.ComputeEC()) // PrevEC of Epoch0 is the empty Merkle Root assert.Equal(t, epoch.MerkleRoot{}, ecRecord.PrevEC()) testFramework.CreateBlock("Block3", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Block2"), tangleold.WithIssuer(nodes["C"].PublicKey()), tangleold.WithECRecord(ecRecord)) testFramework.IssueBlocks("Block3").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block3") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } assertExistenceOfBlock(t, testFramework, notarizationMgr, map[string]bool{ @@ -231,16 +236,18 @@ func TestManager_UpdateTangleTree(t *testing.T) { ecRecord, _, err := testFramework.LatestCommitment() require.NoError(t, err) - assert.Equal(t, EC0, EC(ecRecord)) + assert.Equal(t, EC0, ecRecord.ComputeEC()) // PrevEC of Epoch0 is the empty Merkle Root assert.Equal(t, epoch.MerkleRoot{}, ecRecord.PrevEC()) event.Loop.WaitUntilAllTasksProcessed() eventHandlerMock.Expect("EpochCommittable", epoch.Index(1)) + eventHandlerMock.Expect("ManaVectorUpdate", epoch.Index(1)) + testFramework.CreateBlock("Block4", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Block3", "Block2"), tangleold.WithIssuer(nodes["D"].PublicKey()), tangleold.WithECRecord(ecRecord)) testFramework.IssueBlocks("Block4").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block4") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } assertExistenceOfBlock(t, testFramework, notarizationMgr, map[string]bool{ @@ -260,7 +267,7 @@ func TestManager_UpdateTangleTree(t *testing.T) { testFramework.IssueBlocks("Block5").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block5") - assert.Equal(t, epoch.Index(1), blk.EI()) + assert.Equal(t, epoch.Index(1), blk.ECRecordEI()) assert.Equal(t, EC0, ecRecord.PrevEC()) } @@ -276,7 +283,8 @@ func TestManager_UpdateStateMutationTree(t *testing.T) { var weightProvider *tangleold.CManaWeightProvider manaRetrieverMock := func() map[identity.ID]float64 { for _, node := range nodes { - weightProvider.Update(time.Now(), node.ID()) + ei := epoch.IndexFromTime(time.Now()) + weightProvider.Update(ei, node.ID()) } return map[identity.ID]float64{ nodes["A"].ID(): 30, @@ -286,7 +294,9 @@ func TestManager_UpdateStateMutationTree(t *testing.T) { nodes["E"].ID(): 10, } } - weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, time.Now) + confirmedRetrieverFunc := func() epoch.Index { return 0 } + + weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, time.Now, confirmedRetrieverFunc) epochInterval := 1 * time.Second @@ -303,12 +313,12 @@ func TestManager_UpdateStateMutationTree(t *testing.T) { ecRecord, _, err := testFramework.LatestCommitment() require.NoError(t, err) - EC0 = EC(ecRecord) + EC0 = ecRecord.ComputeEC() testFramework.CreateBlock("Block1", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Genesis"), tangleold.WithIssuer(nodes["A"].PublicKey()), tangleold.WithECRecord(ecRecord)) testFramework.IssueBlocks("Block1").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block1") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } issuingTime = issuingTime.Add(epochInterval) @@ -323,7 +333,7 @@ func TestManager_UpdateStateMutationTree(t *testing.T) { testFramework.IssueBlocks("Block2").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block2") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } issuingTime = issuingTime.Add(epochInterval) @@ -338,7 +348,7 @@ func TestManager_UpdateStateMutationTree(t *testing.T) { testFramework.IssueBlocks("Block3").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block3") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } issuingTime = issuingTime.Add(epochInterval) @@ -351,11 +361,12 @@ func TestManager_UpdateStateMutationTree(t *testing.T) { require.NoError(t, err) eventHandlerMock.Expect("EpochCommittable", epoch.Index(1)) + eventHandlerMock.Expect("ManaVectorUpdate", epoch.Index(1)) testFramework.CreateBlock("Block4", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Block3"), tangleold.WithIssuer(nodes["D"].PublicKey()), tangleold.WithECRecord(ecRecord)) testFramework.IssueBlocks("Block4").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block4") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } issuingTime = issuingTime.Add(epochInterval) @@ -366,14 +377,16 @@ func TestManager_UpdateStateMutationTree(t *testing.T) { ecRecord, _, err := testFramework.LatestCommitment() require.NoError(t, err) - EC1 = EC(ecRecord) + EC1 = ecRecord.ComputeEC() eventHandlerMock.Expect("EpochCommittable", epoch.Index(2)) + eventHandlerMock.Expect("ManaVectorUpdate", epoch.Index(2)) + testFramework.CreateBlock("Block5", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Block4"), tangleold.WithIssuer(nodes["A"].PublicKey()), tangleold.WithInputs("A"), tangleold.WithOutput("C", 500), tangleold.WithECRecord(ecRecord)) testFramework.IssueBlocks("Block5").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block5") - assert.Equal(t, epoch.Index(1), blk.EI()) + assert.Equal(t, epoch.Index(1), blk.ECRecordEI()) assert.Equal(t, EC0, ecRecord.PrevEC()) } @@ -383,14 +396,16 @@ func TestManager_UpdateStateMutationTree(t *testing.T) { ecRecord, _, err := testFramework.LatestCommitment() require.NoError(t, err) - EC2 = EC(ecRecord) + EC2 = ecRecord.ComputeEC() + eventHandlerMock.Expect("EpochCommittable", epoch.Index(3)) - eventHandlerMock.Expect("ManaVectorUpdate", epoch.Index(3), []*ledger.OutputWithMetadata{}, []*ledger.OutputWithMetadata{}) + eventHandlerMock.Expect("ManaVectorUpdate", epoch.Index(3)) + testFramework.CreateBlock("Block6", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Block5"), tangleold.WithIssuer(nodes["E"].PublicKey()), tangleold.WithInputs("B"), tangleold.WithOutput("D", 500), tangleold.WithECRecord(ecRecord)) testFramework.IssueBlocks("Block6").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block6") - assert.Equal(t, epoch.Index(2), blk.EI()) + assert.Equal(t, epoch.Index(2), blk.ECRecordEI()) assert.Equal(t, EC1, ecRecord.PrevEC()) } @@ -405,12 +420,12 @@ func TestManager_UpdateStateMutationTree(t *testing.T) { require.NoError(t, err) eventHandlerMock.Expect("EpochCommittable", epoch.Index(4)) - eventHandlerMock.Expect("ManaVectorUpdate", epoch.Index(4), []*ledger.OutputWithMetadata{}, []*ledger.OutputWithMetadata{}) + eventHandlerMock.Expect("ManaVectorUpdate", epoch.Index(4)) testFramework.CreateBlock("Block7", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Block6"), tangleold.WithIssuer(nodes["C"].PublicKey()), tangleold.WithInputs("C"), tangleold.WithOutput("E", 500), tangleold.WithECRecord(ecRecord)) testFramework.IssueBlocks("Block7").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block7") - assert.Equal(t, epoch.Index(3), blk.EI()) + assert.Equal(t, epoch.Index(3), blk.ECRecordEI()) assert.Equal(t, EC2, ecRecord.PrevEC()) } @@ -424,7 +439,7 @@ func TestManager_UpdateStateMutationTree(t *testing.T) { testFramework.IssueBlocks("Block8").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block8") - assert.Equal(t, epoch.Index(3), blk.EI()) + assert.Equal(t, epoch.Index(3), blk.ECRecordEI()) assertExistenceOfTransaction(t, testFramework, notarizationMgr, map[string]bool{ "Block5": true, "Block6": true, @@ -443,7 +458,8 @@ func TestManager_UpdateStateMutationTreeWithConflict(t *testing.T) { var weightProvider *tangleold.CManaWeightProvider manaRetrieverMock := func() map[identity.ID]float64 { for _, node := range nodes { - weightProvider.Update(time.Now(), node.ID()) + ei := epoch.IndexFromTime(time.Now()) + weightProvider.Update(ei, node.ID()) } return map[identity.ID]float64{ nodes["A"].ID(): 30, @@ -458,8 +474,9 @@ func TestManager_UpdateStateMutationTreeWithConflict(t *testing.T) { // Make Current Epoch be epoch 5 genesisTime := time.Now().Add(-epochInterval * 5) + confirmedRetrieverFunc := func() epoch.Index { return 0 } - weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, time.Now) + weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, time.Now, confirmedRetrieverFunc) testFramework, eventHandlerMock, notarizationMgr := setupFramework(t, genesisTime, epochInterval, epochInterval*2, tangleold.ApprovalWeights(weightProvider), tangleold.WithConflictDAGOptions(conflictdag.WithMergeToMaster(false))) issuingTime := genesisTime @@ -474,7 +491,7 @@ func TestManager_UpdateStateMutationTreeWithConflict(t *testing.T) { testFramework.IssueBlocks("Block1").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block1") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block2, issuing time epoch 1 { @@ -486,7 +503,7 @@ func TestManager_UpdateStateMutationTreeWithConflict(t *testing.T) { testFramework.IssueBlocks("Block2").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block2") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block3, issuing time epoch 1 { @@ -498,7 +515,7 @@ func TestManager_UpdateStateMutationTreeWithConflict(t *testing.T) { testFramework.IssueBlocks("Block3").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block3") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block4, issuing time epoch 1 { @@ -510,7 +527,7 @@ func TestManager_UpdateStateMutationTreeWithConflict(t *testing.T) { testFramework.IssueBlocks("Block4").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block4") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } issuingTime = issuingTime.Add(epochInterval) @@ -525,7 +542,7 @@ func TestManager_UpdateStateMutationTreeWithConflict(t *testing.T) { testFramework.IssueBlocks("Block5").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block5") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block6 TX2, issuing time epoch 2 { @@ -537,7 +554,7 @@ func TestManager_UpdateStateMutationTreeWithConflict(t *testing.T) { testFramework.IssueBlocks("Block6").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block6") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } assertExistenceOfBlock(t, testFramework, notarizationMgr, map[string]bool{ @@ -559,7 +576,7 @@ func TestManager_UpdateStateMutationTreeWithConflict(t *testing.T) { testFramework.IssueBlocks("Block7").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block7") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } assertExistenceOfBlock(t, testFramework, notarizationMgr, map[string]bool{ @@ -581,11 +598,12 @@ func TestManager_UpdateStateMutationTreeWithConflict(t *testing.T) { require.NoError(t, err) eventHandlerMock.Expect("EpochCommittable", epoch.Index(1)) + eventHandlerMock.Expect("ManaVectorUpdate", epoch.Index(1)) testFramework.CreateBlock("Block8", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Block7"), tangleold.WithIssuer(nodes["D"].PublicKey()), tangleold.WithECRecord(ecRecord)) testFramework.IssueBlocks("Block8").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block8") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } assertExistenceOfBlock(t, testFramework, notarizationMgr, map[string]bool{ @@ -607,7 +625,8 @@ func TestManager_TransactionInclusionUpdate(t *testing.T) { var weightProvider *tangleold.CManaWeightProvider manaRetrieverMock := func() map[identity.ID]float64 { for _, node := range nodes { - weightProvider.Update(time.Now(), node.ID()) + ei := epoch.IndexFromTime(time.Now()) + weightProvider.Update(ei, node.ID()) } return map[identity.ID]float64{ nodes["A"].ID(): 30, @@ -622,8 +641,9 @@ func TestManager_TransactionInclusionUpdate(t *testing.T) { // Make Current Epoch be epoch 5 genesisTime := time.Now().Add(-epochInterval * 5) + confirmedRetrieverFunc := func() epoch.Index { return 0 } - weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, time.Now) + weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, time.Now, confirmedRetrieverFunc) testFramework, eventHandlerMock, notarizationMgr := setupFramework(t, genesisTime, epochInterval, epochInterval*2, tangleold.ApprovalWeights(weightProvider), tangleold.WithConflictDAGOptions(conflictdag.WithMergeToMaster(false))) issuingTime := genesisTime @@ -638,7 +658,7 @@ func TestManager_TransactionInclusionUpdate(t *testing.T) { testFramework.IssueBlocks("Block1").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block1") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block2, issuing time epoch 1 { @@ -650,7 +670,7 @@ func TestManager_TransactionInclusionUpdate(t *testing.T) { testFramework.IssueBlocks("Block2").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block2") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block3 TX1, issuing time epoch 1 { @@ -662,7 +682,7 @@ func TestManager_TransactionInclusionUpdate(t *testing.T) { testFramework.IssueBlocks("Block3").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block3") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block4 TX2, issuing time epoch 1 { @@ -674,7 +694,7 @@ func TestManager_TransactionInclusionUpdate(t *testing.T) { testFramework.IssueBlocks("Block4").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block4") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) // pre-create block 8 testFramework.CreateBlock("Block8", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Block4"), tangleold.WithIssuer(nodes["B"].PublicKey()), tangleold.WithInputs("C"), tangleold.WithOutput("E", 500), tangleold.WithECRecord(ecRecord)) @@ -692,7 +712,7 @@ func TestManager_TransactionInclusionUpdate(t *testing.T) { testFramework.IssueBlocks("Block5").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block5") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block6, issuing time epoch 2 { @@ -704,7 +724,7 @@ func TestManager_TransactionInclusionUpdate(t *testing.T) { testFramework.IssueBlocks("Block6").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block6") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block7, issuing time epoch 2 { @@ -716,14 +736,14 @@ func TestManager_TransactionInclusionUpdate(t *testing.T) { testFramework.IssueBlocks("Block7").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block7") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block8, issuing time epoch 1, earlier attachment of Block6, with same tx { testFramework.IssueBlocks("Block8").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block8") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block9, issuing time epoch 2 { @@ -735,7 +755,7 @@ func TestManager_TransactionInclusionUpdate(t *testing.T) { testFramework.IssueBlocks("Block9").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block9") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block10, issuing time epoch 2 { @@ -747,7 +767,7 @@ func TestManager_TransactionInclusionUpdate(t *testing.T) { testFramework.IssueBlocks("Block10").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block10") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } assertExistenceOfTransaction(t, testFramework, notarizationMgr, map[string]bool{ @@ -776,7 +796,8 @@ func TestManager_DiffUTXOs(t *testing.T) { var weightProvider *tangleold.CManaWeightProvider manaRetrieverMock := func() map[identity.ID]float64 { for _, node := range nodes { - weightProvider.Update(time.Now(), node.ID()) + ei := epoch.IndexFromTime(time.Now()) + weightProvider.Update(ei, node.ID()) } return map[identity.ID]float64{ nodes["A"].ID(): 30, @@ -786,13 +807,14 @@ func TestManager_DiffUTXOs(t *testing.T) { nodes["E"].ID(): 10, } } + confirmedRetrieverFunc := func() epoch.Index { return 0 } epochInterval := 1 * time.Second // Make Current Epoch be epoch 5 genesisTime := time.Now().Add(-epochInterval * 5) - weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, time.Now) + weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, time.Now, confirmedRetrieverFunc) testFramework, eventHandlerMock, notarizationMgr := setupFramework(t, genesisTime, epochInterval, epochInterval*2, tangleold.ApprovalWeights(weightProvider), tangleold.WithConflictDAGOptions(conflictdag.WithMergeToMaster(false))) issuingTime := genesisTime @@ -808,7 +830,7 @@ func TestManager_DiffUTXOs(t *testing.T) { testFramework.IssueBlocks("Block1").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block1") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block2, issuing time epoch 1 @@ -822,7 +844,7 @@ func TestManager_DiffUTXOs(t *testing.T) { testFramework.IssueBlocks("Block2").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block2") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } assertEpochDiff(t, testFramework, notarizationMgr, epoch.Index(1), []string{"A"}, []string{"C1", "C1+"}) @@ -840,7 +862,7 @@ func TestManager_DiffUTXOs(t *testing.T) { testFramework.IssueBlocks("Block3").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block3") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block4, issuing time epoch 2 @@ -854,7 +876,7 @@ func TestManager_DiffUTXOs(t *testing.T) { testFramework.IssueBlocks("Block4").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block4") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } assertEpochDiff(t, testFramework, notarizationMgr, epoch.Index(2), []string{"D2"}, []string{"E3"}) @@ -873,7 +895,7 @@ func TestManager_DiffUTXOs(t *testing.T) { testFramework.IssueBlocks("Block5").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block5") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } assertEpochDiff(t, testFramework, notarizationMgr, epoch.Index(1), []string{"A", "B"}, []string{"C1", "C1+", "D2"}) @@ -888,11 +910,12 @@ func TestManager_DiffUTXOs(t *testing.T) { require.Equal(t, epoch.Index(0), ecRecord.EI()) eventHandlerMock.Expect("EpochCommittable", epoch.Index(1)) + eventHandlerMock.Expect("ManaVectorUpdate", epoch.Index(1)) testFramework.CreateBlock("Block6", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Block5"), tangleold.WithIssuer(nodes["E"].PublicKey()), tangleold.WithInputs("G5"), tangleold.WithOutput("H6", 500), tangleold.WithECRecord(ecRecord)) testFramework.IssueBlocks("Block6").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block6") - assert.Equal(t, epoch.Index(0), blk.EI()) + assert.Equal(t, epoch.Index(0), blk.ECRecordEI()) } // Block7, issuing time epoch 3, if we loaded the diff we should just have F4 and H6 as spent and created @@ -907,7 +930,7 @@ func TestManager_DiffUTXOs(t *testing.T) { testFramework.IssueBlocks("Block7").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block7") - assert.Equal(t, epoch.Index(1), blk.EI()) + assert.Equal(t, epoch.Index(1), blk.ECRecordEI()) } // Block8, issuing time epoch 2, reattaches Block6's TX from epoch 3 to epoch 2 @@ -922,7 +945,7 @@ func TestManager_DiffUTXOs(t *testing.T) { testFramework.IssueBlocks("Block8").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block8") - assert.Equal(t, epoch.Index(1), blk.EI()) + assert.Equal(t, epoch.Index(1), blk.ECRecordEI()) } // Block9, issuing time epoch 3, confirms Block8 (reattachment of Block 6) @@ -937,7 +960,7 @@ func TestManager_DiffUTXOs(t *testing.T) { testFramework.IssueBlocks("Block9").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block9") - assert.Equal(t, epoch.Index(1), blk.EI()) + assert.Equal(t, epoch.Index(1), blk.ECRecordEI()) } // Block10, issuing time epoch 3, confirms Block9 and reattachment of Block 6 @@ -952,7 +975,7 @@ func TestManager_DiffUTXOs(t *testing.T) { testFramework.IssueBlocks("Block10").WaitUntilAllTasksProcessed() blk := testFramework.Block("Block10") - assert.Equal(t, epoch.Index(1), blk.EI()) + assert.Equal(t, epoch.Index(1), blk.ECRecordEI()) } assertEpochDiff(t, testFramework, notarizationMgr, epoch.Index(2), []string{"G5", "D2"}, []string{"F4", "H6"}) @@ -961,6 +984,103 @@ func TestManager_DiffUTXOs(t *testing.T) { eventHandlerMock.AssertExpectations(t) } +func TestManager_ActivityTree(t *testing.T) { + nodes := make(map[string]*identity.Identity) + for _, node := range []string{"A", "B", "C", "D", "E"} { + nodes[node] = identity.GenerateIdentity() + } + // Make Current Epoch be epoch 5 + epochInterval := 1 * time.Second + genesisTime := time.Now().Add(-epochInterval * 5) + + timeManager := struct{ time time.Time }{time: genesisTime} + timeRetrieverFunc := func() time.Time { return timeManager.time } + + var weightProvider *tangleold.CManaWeightProvider + manaRetrieverMock := func() map[identity.ID]float64 { + return map[identity.ID]float64{ + nodes["A"].ID(): 30, + nodes["B"].ID(): 15, + nodes["C"].ID(): 25, + nodes["D"].ID(): 20, + nodes["E"].ID(): 10, + } + } + //for _, node := range nodes { + // ei := epoch.IndexFromTime(time.Now()) + // weightProvider.Update(ei, node.ID()) + //} + confirmedRetrieverFunc := func() epoch.Index { return 0 } + + weightProvider = tangleold.NewCManaWeightProvider(manaRetrieverMock, timeRetrieverFunc, confirmedRetrieverFunc) + testFramework, _, _ := setupFramework(t, genesisTime, epochInterval, epochInterval*2, tangleold.ApprovalWeights(weightProvider), tangleold.WithConflictDAGOptions(conflictdag.WithMergeToMaster(false))) + + // expected activity records + activeNodesTest := make(map[epoch.Index][]identity.ID) + activeNodesTest[epoch.Index(6)] = []identity.ID{nodes["A"].ID(), nodes["B"].ID()} + activeNodesTest[epoch.Index(7)] = []identity.ID{nodes["A"].ID(), nodes["B"].ID(), nodes["C"].ID()} + activeNodesTest[epoch.Index(10)] = []identity.ID{nodes["C"].ID()} + + issuingTime := genesisTime.Add(epochInterval * 5) + timeManager.time = issuingTime + // Block1, issuing time epoch 1 + { + fmt.Println("block 1 and block 2 in epoch 1") + + ecRecord, _, err := testFramework.LatestCommitment() + require.NoError(t, err) + ei := epoch.IndexFromTime(issuingTime) + + testFramework.CreateBlock("Block1", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Genesis"), tangleold.WithIssuer(nodes["A"].PublicKey()), tangleold.WithInputs("A"), tangleold.WithOutput("C1", 400), tangleold.WithECRecord(ecRecord)) + testFramework.IssueBlocks("Block1").WaitUntilAllTasksProcessed() + testFramework.CreateBlock("Block2", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Genesis"), tangleold.WithIssuer(nodes["B"].PublicKey()), tangleold.WithInputs("B"), tangleold.WithOutput("C2", 400), tangleold.WithECRecord(ecRecord)) + testFramework.IssueBlocks("Block2").WaitUntilAllTasksProcessed() + weightProvider.Update(ei, nodes["A"].ID()) + weightProvider.Update(ei, nodes["B"].ID()) + + activeNodes, _ := weightProvider.WeightsOfRelevantVoters() + assert.Equal(t, len(activeNodesTest[ei]), len(activeNodes)) + for _, n := range activeNodesTest[ei] { + assert.Contains(t, activeNodes, n) + } + } + + issuingTime = issuingTime.Add(epochInterval) + timeManager.time = issuingTime + + { + fmt.Println("block 3 in epoch 2") + + ecRecord, _, err := testFramework.LatestCommitment() + require.NoError(t, err) + ei := epoch.IndexFromTime(issuingTime) + + testFramework.CreateBlock("Block3", tangleold.WithIssuingTime(issuingTime), tangleold.WithStrongParents("Genesis"), tangleold.WithIssuer(nodes["C"].PublicKey()), tangleold.WithInputs("C1"), tangleold.WithInputs("C2"), tangleold.WithOutput("E1", 800), tangleold.WithECRecord(ecRecord)) + testFramework.IssueBlocks("Block3").WaitUntilAllTasksProcessed() + weightProvider.Update(epoch.IndexFromTime(issuingTime), nodes["C"].ID()) + + activeNodes, _ := weightProvider.WeightsOfRelevantVoters() + assert.Equal(t, len(activeNodesTest[ei]), len(activeNodes)) + for _, n := range activeNodesTest[ei] { + assert.Contains(t, activeNodes, n) + } + } + + issuingTime = issuingTime.Add(epochInterval * 16) + timeManager.time = issuingTime + + { + ei := epoch.IndexFromTime(issuingTime) + + activeNodes, _ := weightProvider.WeightsOfRelevantVoters() + assert.Equal(t, len(activeNodesTest[ei]), len(activeNodes)) + for _, n := range activeNodesTest[ei] { + assert.Contains(t, activeNodes, n) + } + } + +} + func setupFramework(t *testing.T, genesisTime time.Time, epochInterval time.Duration, minCommittable time.Duration, options ...tangleold.Option) (testFramework *tangleold.BlockTestFramework, eventMock *EventMock, m *Manager) { epoch.Duration = int64(epochInterval.Seconds()) @@ -979,7 +1099,7 @@ func setupFramework(t *testing.T, genesisTime time.Time, epochInterval time.Dura // set up notarization manager ecFactory := NewEpochCommitmentFactory(testTangle.Options.Store, testTangle, 0) - m = NewManager(ecFactory, testTangle, MinCommittableEpochAge(minCommittable), BootstrapWindow(minCommittable*2), ManaDelay(2), Log(logger.NewExampleLogger("test"))) + m = NewManager(ecFactory, testTangle, MinCommittableEpochAge(minCommittable), BootstrapWindow(minCommittable*2), Log(logger.NewExampleLogger("test"))) commitmentFunc := func() (ecRecord *epoch.ECRecord, latestConfirmedEpoch epoch.Index, err error) { ecRecord, err = m.GetLatestEC() @@ -1096,8 +1216,8 @@ func loadSnapshot(m *Manager, testFramework *tangleold.BlockTestFramework) { snapshot.Header = header m.LoadOutputsWithMetadata(snapshot.OutputsWithMetadata) - m.LoadEpochDiffs(snapshot.Header, snapshot.EpochDiffs) m.LoadECandEIs(snapshot.Header) + m.LoadActivityLogs(snapshot.EpochActiveNodes) } func registerToTangleEvents(sfg *acceptance.Gadget, testTangle *tangleold.Tangle) { diff --git a/packages/core/notarization/testutils.go b/packages/core/notarization/testutils.go index 2d4f11b685..27eb731a30 100644 --- a/packages/core/notarization/testutils.go +++ b/packages/core/notarization/testutils.go @@ -20,7 +20,7 @@ const ( var ( // TestConflictAcceptanceStateTranslation translates a conflict's AW into a confirmation state. - TestConflictAcceptanceStateTranslation acceptance.ConflictThresholdTranslation = func(conflictID utxo.TransactionID, aw float64) confirmation.State { + TestConflictAcceptanceStateTranslation acceptance.ConflictThresholdTranslation = func(_ utxo.TransactionID, aw float64) confirmation.State { if aw >= testingAcceptanceThreshold { return confirmation.Accepted } @@ -107,6 +107,6 @@ func (e *EventMock) EpochCommittable(event *EpochCommittableEvent) { // ManaVectorUpdate is the mocked ManaVectorUpdate event. func (e *EventMock) ManaVectorUpdate(event *ManaVectorUpdateEvent) { - e.Called(event.EI, event.EpochDiffCreated, event.EpochDiffSpent) + e.Called(event.EI) atomic.AddUint64(&e.calledEvents, 1) } diff --git a/packages/core/snapshot/manager.go b/packages/core/snapshot/manager.go new file mode 100644 index 0000000000..cb16528587 --- /dev/null +++ b/packages/core/snapshot/manager.go @@ -0,0 +1,139 @@ +package snapshot + +import ( + "errors" + "sync" + + "github.com/iotaledger/goshimmer/packages/core/epoch" + "github.com/iotaledger/goshimmer/packages/core/ledger" + "github.com/iotaledger/goshimmer/packages/core/notarization" + "github.com/iotaledger/goshimmer/packages/core/tangleold" + "github.com/iotaledger/hive.go/core/generics/shrinkingmap" + "github.com/iotaledger/hive.go/core/types" +) + +// Manager is the snapshot manager. +type Manager struct { + sync.RWMutex + + notarizationMgr *notarization.Manager + seps *shrinkingmap.ShrinkingMap[epoch.Index, map[tangleold.BlockID]types.Empty] + snapshotDepth int +} + +// NewManager creates and returns a new snapshot manager. +func NewManager(nmgr *notarization.Manager, depth int) (new *Manager) { + new = &Manager{ + notarizationMgr: nmgr, + seps: shrinkingmap.New[epoch.Index, map[tangleold.BlockID]types.Empty](), + snapshotDepth: depth, + } + + return +} + +// CreateSnapshot creates a snapshot file from node with a given name. +func (m *Manager) CreateSnapshot(snapshotFileName string) (header *ledger.SnapshotHeader, err error) { + // lock the entire solid entry points storage until the snapshot is created. + m.RLock() + defer m.RUnlock() + + fullEpochIndex, ecRecord, err := m.notarizationMgr.StartSnapshot() + defer m.notarizationMgr.EndSnapshot() + + headerProd := func() (header *ledger.SnapshotHeader, err error) { + header = &ledger.SnapshotHeader{ + FullEpochIndex: fullEpochIndex, + DiffEpochIndex: ecRecord.EI(), + LatestECRecord: ecRecord, + } + return header, nil + } + + sepsProd := NewSolidEntryPointsProducer(fullEpochIndex, ecRecord.EI(), m) + outputWithMetadataProd := NewLedgerUTXOStatesProducer(m.notarizationMgr) + epochDiffsProd := NewEpochDiffsProducer(fullEpochIndex, ecRecord.EI(), m.notarizationMgr) + activityProducer := NewActivityLogProducer(m.notarizationMgr, ecRecord.EI()) + + header, err = CreateSnapshot(snapshotFileName, headerProd, sepsProd, outputWithMetadataProd, epochDiffsProd, activityProducer) + + return +} + +// LoadSolidEntryPoints add solid entry points to storage. +func (m *Manager) LoadSolidEntryPoints(seps *SolidEntryPoints) { + if seps == nil { + return + } + + m.Lock() + defer m.Unlock() + + sep := make(map[tangleold.BlockID]types.Empty) + for _, b := range seps.Seps { + sep[b] = types.Void + } + m.seps.Set(seps.EI, sep) +} + +// AdvanceSolidEntryPoints remove seps of old epoch when confirmed epoch advanced. +func (m *Manager) AdvanceSolidEntryPoints(ei epoch.Index) { + m.Lock() + defer m.Unlock() + + // preserve seps of last confirmed epoch until now + m.seps.Delete(ei - epoch.Index(m.snapshotDepth) - 1) +} + +// InsertSolidEntryPoint inserts a solid entry point to the seps map. +func (m *Manager) InsertSolidEntryPoint(id tangleold.BlockID) { + m.Lock() + defer m.Unlock() + + sep, ok := m.seps.Get(id.EpochIndex) + if !ok { + sep = make(map[tangleold.BlockID]types.Empty) + } + + sep[id] = types.Void + m.seps.Set(id.EpochIndex, sep) +} + +// RemoveSolidEntryPoint removes a solid entry points from the map. +func (m *Manager) RemoveSolidEntryPoint(b *tangleold.Block) (err error) { + m.Lock() + defer m.Unlock() + + epochSeps, exists := m.seps.Get(b.ID().EpochIndex) + if !exists { + return errors.New("solid entry point of the epoch does not exist") + } + + delete(epochSeps, b.ID()) + + return +} + +// snapshotSolidEntryPoints snapshots seps within given epochs. +func (m *Manager) snapshotSolidEntryPoints(lastConfirmedEpoch, latestCommitableEpoch epoch.Index, prodChan chan *SolidEntryPoints, stopChan chan struct{}) { + go func() { + for i := lastConfirmedEpoch; i <= latestCommitableEpoch; i++ { + seps := make([]tangleold.BlockID, 0) + + epochSeps, _ := m.seps.Get(i) + for blkID := range epochSeps { + seps = append(seps, blkID) + } + + send := &SolidEntryPoints{ + EI: i, + Seps: seps, + } + prodChan <- send + } + + close(stopChan) + }() + + return +} diff --git a/packages/core/snapshot/read.go b/packages/core/snapshot/read.go index cbaf30988c..e662adf1ba 100644 --- a/packages/core/snapshot/read.go +++ b/packages/core/snapshot/read.go @@ -2,58 +2,65 @@ package snapshot import ( "bufio" - "bytes" "context" "encoding/binary" - "fmt" "io" "github.com/cockroachdb/errors" - "github.com/iotaledger/hive.go/core/serix" - "github.com/iotaledger/goshimmer/packages/core/epoch" "github.com/iotaledger/goshimmer/packages/core/ledger" + "github.com/iotaledger/goshimmer/packages/core/tangleold" + "github.com/iotaledger/hive.go/core/serix" ) // streamSnapshotDataFrom consumes a snapshot from the given reader. func streamSnapshotDataFrom( reader io.ReadSeeker, headerConsumer HeaderConsumerFunc, + sepsConsumer SolidEntryPointsConsumerFunc, outputConsumer UTXOStatesConsumerFunc, - epochDiffsConsumer EpochDiffsConsumerFunc) error { + epochDiffsConsumer EpochDiffsConsumerFunc, + activityLogConsumer ActivityLogConsumerFunc) error { header, err := readSnapshotHeader(reader) if err != nil { - return err + return errors.Wrap(err, "failed to stream snapshot header from snapshot") } + headerConsumer(header) - scanner := bufio.NewScanner(reader) - scanner.Split(scanDelimiter) - - // read latest ECRecord - ecRecord, err := readECRecord(scanner) - if err != nil { - return err + // read solid entry points + for i := header.FullEpochIndex; i <= header.DiffEpochIndex; i++ { + seps, solidErr := readSolidEntryPoints(reader) + if solidErr != nil { + return errors.Wrap(solidErr, "failed to stream solid entry points from snapshot") + } + sepsConsumer(seps) } - header.LatestECRecord = ecRecord - headerConsumer(header) // read outputWithMetadata for i := 0; uint64(i) < header.OutputWithMetadataCount; { - outputs, err := readOutputWithMetadata(scanner) - if err != nil { - return err + outputs, outErr := readOutputsWithMetadatas(reader) + if outErr != nil { + return errors.Wrap(outErr, "failed to stream output with metadata from snapshot") } i += len(outputs) - outputConsumer(outputs) } - epochDiffs, err := readEpochDiffs(scanner) + // read epochDiffs + for ei := header.FullEpochIndex + 1; ei <= header.DiffEpochIndex; ei++ { + epochDiffs, epochErr := readEpochDiffs(reader) + if epochErr != nil { + return errors.Wrapf(epochErr, "failed to parse epochDiffs from bytes") + } + epochDiffsConsumer(epochDiffs) + } + + activityLog, err := readActivityLog(reader) if err != nil { - return errors.Errorf("failed to parse epochDiffs from bytes: %w", err) + return errors.Wrap(err, "failed to parse activity log from bytes") } - epochDiffsConsumer(header, epochDiffs) + activityLogConsumer(activityLog) return nil } @@ -62,68 +69,142 @@ func readSnapshotHeader(reader io.ReadSeeker) (*ledger.SnapshotHeader, error) { header := &ledger.SnapshotHeader{} if err := binary.Read(reader, binary.LittleEndian, &header.OutputWithMetadataCount); err != nil { - return nil, fmt.Errorf("unable to read outputWithMetadata length: %w", err) + return nil, errors.Wrap(err, "unable to read outputWithMetadata length") } var index int64 if err := binary.Read(reader, binary.LittleEndian, &index); err != nil { - return nil, fmt.Errorf("unable to read fullEpochIndex: %w", err) + return nil, errors.Wrap(err, "unable to read fullEpochIndex") } header.FullEpochIndex = epoch.Index(index) if err := binary.Read(reader, binary.LittleEndian, &index); err != nil { - return nil, fmt.Errorf("unable to read diffEpochIndex: %w", err) + return nil, errors.Wrap(err, "unable to read diffEpochIndex") } header.DiffEpochIndex = epoch.Index(index) + var latestECRecordLen int64 + if err := binary.Read(reader, binary.LittleEndian, &latestECRecordLen); err != nil { + return nil, errors.Errorf("unable to read latest ECRecord bytes len: %w", err) + } + + ecRecordBytes := make([]byte, latestECRecordLen) + if err := binary.Read(reader, binary.LittleEndian, ecRecordBytes); err != nil { + return nil, errors.Errorf("unable to read latest ECRecord: %w", err) + } + header.LatestECRecord = &epoch.ECRecord{} + if err := header.LatestECRecord.FromBytes(ecRecordBytes); err != nil { + return nil, err + } + return header, nil } -// readOutputWithMetadata consumes a slice of OutputWithMetadata from the given reader. -func readOutputWithMetadata(scanner *bufio.Scanner) (outputMetadatas []*ledger.OutputWithMetadata, err error) { - scanner.Scan() - data := scanner.Bytes() +func readSolidEntryPoints(reader io.ReadSeeker) (seps *SolidEntryPoints, err error) { + seps = &SolidEntryPoints{} + blkIDs := make([]tangleold.BlockID, 0) + + // read seps EI + var index int64 + if err := binary.Read(reader, binary.LittleEndian, &index); err != nil { + return nil, errors.Errorf("unable to read epoch index: %w", err) + } + seps.EI = epoch.Index(index) + + // read numbers of solid entry point + var sepsLen int64 + if err := binary.Read(reader, binary.LittleEndian, &sepsLen); err != nil { + return nil, errors.Errorf("unable to read seps len: %w", err) + } + + for i := 0; i < int(sepsLen); { + var sepsBytesLen int64 + if err := binary.Read(reader, binary.LittleEndian, &sepsBytesLen); err != nil { + return nil, errors.Errorf("unable to read seps bytes len: %w", err) + } + + sepsBytes := make([]byte, sepsBytesLen) + if err := binary.Read(reader, binary.LittleEndian, sepsBytes); err != nil { + return nil, errors.Errorf("unable to read solid entry points: %w", err) + } - if len(data) > 0 { - outputMetadatas = make([]*ledger.OutputWithMetadata, 0) - _, err = serix.DefaultAPI.Decode(context.Background(), data, &outputMetadatas, serix.WithValidation()) + ids := make([]tangleold.BlockID, 0) + _, err = serix.DefaultAPI.Decode(context.Background(), sepsBytes, &ids, serix.WithValidation()) if err != nil { return nil, err } + blkIDs = append(blkIDs, ids...) + i += len(ids) + } - for _, o := range outputMetadatas { - o.SetID(o.M.OutputID) - o.Output().SetID(o.M.OutputID) - } + seps.Seps = blkIDs + + return seps, nil +} + +// readOutputsWithMetadatas consumes less or equal chunkSize of OutputWithMetadatas from the given reader. +func readOutputsWithMetadatas(reader io.ReadSeeker) (outputMetadatas []*ledger.OutputWithMetadata, err error) { + var outputsLen int64 + if err := binary.Read(reader, binary.LittleEndian, &outputsLen); err != nil { + return nil, errors.Errorf("unable to read outputsWithMetadata bytes len: %w", err) + } + + outputsBytes := make([]byte, outputsLen) + if err := binary.Read(reader, binary.LittleEndian, outputsBytes); err != nil { + return nil, errors.Errorf("unable to read outputsWithMetadata: %w", err) + } + + outputMetadatas = make([]*ledger.OutputWithMetadata, 0) + _, err = serix.DefaultAPI.Decode(context.Background(), outputsBytes, &outputMetadatas, serix.WithValidation()) + if err != nil { + return nil, err + } + + for _, o := range outputMetadatas { + o.SetID(o.M.OutputID) + o.Output().SetID(o.M.OutputID) } return } -// readEpochDiffs consumes a map of EpochDiff from the given reader. -func readEpochDiffs(scanner *bufio.Scanner) (epochDiffs map[epoch.Index]*ledger.EpochDiff, err error) { - epochDiffs = make(map[epoch.Index]*ledger.EpochDiff) +// readEpochDiffs consumes an EpochDiff of an epoch from the given reader. +func readEpochDiffs(reader io.ReadSeeker) (epochDiffs *ledger.EpochDiff, err error) { + spent := make([]*ledger.OutputWithMetadata, 0) + created := make([]*ledger.OutputWithMetadata, 0) - scanner.Scan() - data := scanner.Bytes() - if len(data) > 0 { - _, err = serix.DefaultAPI.Decode(context.Background(), data, &epochDiffs, serix.WithValidation()) + // read spent + var spentLen int64 + if err := binary.Read(reader, binary.LittleEndian, &spentLen); err != nil { + return nil, errors.Errorf("unable to read epochDiffs spent len: %w", err) + } + + for i := 0; i < int(spentLen); { + s, err := readOutputsWithMetadatas(reader) if err != nil { - return nil, errors.Errorf("failed to parse epochDiffs from bytes: %w", err) + return nil, errors.Errorf("unable to read epochDiffs spent: %w", err) } + spent = append(spent, s...) + i += len(s) + } + + // read created + var createdLen int64 + if err := binary.Read(reader, binary.LittleEndian, &createdLen); err != nil { + return nil, errors.Errorf("unable to read epochDiffs created len: %w", err) + } - for _, epochdiff := range epochDiffs { - for _, spentOutput := range epochdiff.Spent() { - spentOutput.SetID(spentOutput.M.OutputID) - spentOutput.Output().SetID(spentOutput.M.OutputID) - } - for _, createdOutput := range epochdiff.Created() { - createdOutput.SetID(createdOutput.M.OutputID) - createdOutput.Output().SetID(createdOutput.M.OutputID) - } + for i := 0; i < int(createdLen); { + c, err := readOutputsWithMetadatas(reader) + if err != nil { + return nil, errors.Errorf("unable to read epochDiffs created: %w", err) } + created = append(created, c...) + i += len(c) } + epochDiffs = ledger.NewEpochDiff(spent, created) + return } @@ -134,23 +215,39 @@ func readECRecord(scanner *bufio.Scanner) (ecRecord *epoch.ECRecord, err error) ecRecord = &epoch.ECRecord{} err = ecRecord.FromBytes(scanner.Bytes()) if err != nil { - return nil, errors.Errorf("failed to parse epochDiffs from bytes: %w", err) + return nil, errors.Wrap(err, "failed to parse epochDiffs from bytes") } return } -func scanDelimiter(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - if i := bytes.Index(data, delimiter); i >= 0 { - return i + len(delimiter), data[0:i], nil +// readActivityLog consumes the ActivityLog from the given reader. +func readActivityLog(reader io.ReadSeeker) (activityLogs epoch.SnapshotEpochActivity, err error) { + var activityLen int64 + if lenErr := binary.Read(reader, binary.LittleEndian, &activityLen); lenErr != nil { + return nil, errors.Wrap(lenErr, "unable to read activity len") } - // at EOF, return rest of data. - if atEOF { - return len(data), data, nil + + activityLogs = epoch.NewSnapshotEpochActivity() + + for i := 0; i < int(activityLen); i++ { + var epochIndex epoch.Index + if eiErr := binary.Read(reader, binary.LittleEndian, &epochIndex); eiErr != nil { + return nil, errors.Errorf("unable to read epoch index: %w", eiErr) + } + var activityBytesLen int64 + if activityLenErr := binary.Read(reader, binary.LittleEndian, &activityBytesLen); activityLenErr != nil { + return nil, errors.Errorf("unable to read activity log length: %w", activityLenErr) + } + activityLogBytes := make([]byte, activityBytesLen) + if alErr := binary.Read(reader, binary.LittleEndian, activityLogBytes); alErr != nil { + return nil, errors.Errorf("unable to read activity log: %w", alErr) + } + activityLog := new(epoch.SnapshotNodeActivity) + activityLog.FromBytes(activityLogBytes) + + activityLogs[epochIndex] = activityLog } - return 0, nil, nil + return } diff --git a/packages/core/snapshot/snapshot.go b/packages/core/snapshot/snapshot.go index 0ba1a242eb..e91d9d32c5 100644 --- a/packages/core/snapshot/snapshot.go +++ b/packages/core/snapshot/snapshot.go @@ -4,10 +4,10 @@ import ( "fmt" "os" - "github.com/iotaledger/hive.go/core/serix" - "github.com/iotaledger/goshimmer/packages/core/epoch" "github.com/iotaledger/goshimmer/packages/core/ledger" + "github.com/iotaledger/goshimmer/packages/core/tangleold" + "github.com/iotaledger/hive.go/core/serix" ) // Snapshot contains the data to be put in a snapshot file. @@ -15,6 +15,12 @@ type Snapshot struct { LedgerSnapshot *ledger.Snapshot } +// SolidEntryPoints contains solid entry points of an epoch. +type SolidEntryPoints struct { + EI epoch.Index `serix:"0"` + Seps []tangleold.BlockID `serix:"1,lengthPrefixType=uint32"` +} + func init() { typeSet := new(serix.TypeSettings) ts := typeSet.WithLengthPrefixType(serix.LengthPrefixTypeAsUint32) @@ -24,23 +30,31 @@ func init() { panic(fmt.Errorf("error registering OutputWithMetadata slice type settings: %w", err)) } - err = serix.DefaultAPI.RegisterTypeSettings(map[epoch.Index]*ledger.EpochDiff{}, ts) + err = serix.DefaultAPI.RegisterTypeSettings([]tangleold.BlockID{}, ts) + if err != nil { + panic(fmt.Errorf("error registering block ID slice type settings: %w", err)) + } + + err = serix.DefaultAPI.RegisterTypeSettings(epoch.SnapshotEpochActivity{}, ts) if err != nil { panic(fmt.Errorf("error registering EpochDiff map type settings: %w", err)) } } // CreateSnapshot creates a snapshot file to the given file path. -func CreateSnapshot(filePath string, +func CreateSnapshot( + filePath string, headerProd HeaderProducerFunc, + sepsProd SolidEntryPointsProducerFunc, utxoStatesProd UTXOStatesProducerFunc, - epochDiffsProd EpochDiffProducerFunc) (*ledger.SnapshotHeader, error) { + epochDiffsProd EpochDiffProducerFunc, + activityLogProd ActivityLogProducerFunc) (*ledger.SnapshotHeader, error) { f, err := os.Create(filePath) if err != nil { return nil, fmt.Errorf("fail to create snapshot file: %s", err) } - header, err := streamSnapshotDataTo(f, headerProd, utxoStatesProd, epochDiffsProd) + header, err := streamSnapshotDataTo(f, headerProd, sepsProd, utxoStatesProd, epochDiffsProd, activityLogProd) if err != nil { return nil, err } @@ -54,8 +68,10 @@ func CreateSnapshot(filePath string, // consumer functions. func LoadSnapshot(filePath string, headerConsumer HeaderConsumerFunc, + sepsConsumer SolidEntryPointsConsumerFunc, outputWithMetadataConsumer UTXOStatesConsumerFunc, - epochDiffsConsumer EpochDiffsConsumerFunc) (err error) { + epochDiffsConsumer EpochDiffsConsumerFunc, + activityLogConsumer ActivityLogConsumerFunc) (err error) { f, err := os.Open(filePath) defer f.Close() @@ -63,7 +79,7 @@ func LoadSnapshot(filePath string, return fmt.Errorf("fail to open the snapshot file") } - err = streamSnapshotDataFrom(f, headerConsumer, outputWithMetadataConsumer, epochDiffsConsumer) + err = streamSnapshotDataFrom(f, headerConsumer, sepsConsumer, outputWithMetadataConsumer, epochDiffsConsumer, activityLogConsumer) return } @@ -75,13 +91,25 @@ type UTXOStatesProducerFunc func() (outputWithMetadata *ledger.OutputWithMetadat type UTXOStatesConsumerFunc func(outputWithMetadatas []*ledger.OutputWithMetadata) // EpochDiffProducerFunc is the type of function that produces EpochDiff when taking a snapshot. -type EpochDiffProducerFunc func() (epochDiffs map[epoch.Index]*ledger.EpochDiff, err error) +type EpochDiffProducerFunc func() (epochDiffs *ledger.EpochDiff) // EpochDiffsConsumerFunc is the type of function that consumes EpochDiff when loading a snapshot. -type EpochDiffsConsumerFunc func(header *ledger.SnapshotHeader, epochDiffs map[epoch.Index]*ledger.EpochDiff) +type EpochDiffsConsumerFunc func(epochDiffs *ledger.EpochDiff) + +// ActivityLogProducerFunc is the type of function that produces ActivityLog when taking a snapshot. +type ActivityLogProducerFunc func() (activityLogs epoch.SnapshotEpochActivity) + +// ActivityLogConsumerFunc is the type of function that consumes Activity logs when loading a snapshot. +type ActivityLogConsumerFunc func(activityLogs epoch.SnapshotEpochActivity) // HeaderProducerFunc is the type of function that produces snapshot header when taking a snapshot. type HeaderProducerFunc func() (header *ledger.SnapshotHeader, err error) // HeaderConsumerFunc is the type of function that consumes snapshot header when loading a snapshot. type HeaderConsumerFunc func(header *ledger.SnapshotHeader) + +// SolidEntryPointsProducerFunc is the type of function that produces solid entry points when taking a snapshot. +type SolidEntryPointsProducerFunc func() (seps *SolidEntryPoints) + +// SolidEntryPointsConsumerFunc is the type of function that consumes solid entry points when loading a snapshot. +type SolidEntryPointsConsumerFunc func(seps *SolidEntryPoints) diff --git a/packages/core/snapshot/snapshot_test.go b/packages/core/snapshot/snapshot_test.go index e5afa4e28f..2e18d4c55c 100644 --- a/packages/core/snapshot/snapshot_test.go +++ b/packages/core/snapshot/snapshot_test.go @@ -1,6 +1,7 @@ package snapshot import ( + "math/rand" "os" "testing" "time" @@ -15,6 +16,7 @@ import ( "github.com/iotaledger/goshimmer/packages/core/ledger" "github.com/iotaledger/goshimmer/packages/core/ledger/utxo" "github.com/iotaledger/goshimmer/packages/core/ledger/vm/devnetvm" + "github.com/iotaledger/goshimmer/packages/core/tangleold" ) const ( @@ -30,34 +32,102 @@ var nodesToPledge = []string{ var ( outputsWithMetadata = make([]*ledger.OutputWithMetadata, 0) - epochDiffs = make(map[epoch.Index]*ledger.EpochDiff) + activityLog = epoch.NewSnapshotEpochActivity() + epochDiffs = make([]*ledger.EpochDiff, 0) manaDistribution = createManaDistribution(cfgPledgeTokenAmount) + solidEntryPoints = make([]*SolidEntryPoints, 0) ) func Test_CreateAndReadSnapshot(t *testing.T) { header := createSnapshot(t) - rheader, rstates, repochDiffs := readSnapshot(t) + rheader, rseps, rstates, repochDiffs, ractivity := readSnapshot(t) compareSnapshotHeader(t, header, rheader) compareOutputWithMetadataSlice(t, outputsWithMetadata, rstates) compareEpochDiffs(t, epochDiffs, repochDiffs) + compareSolidEntryPoints(t, solidEntryPoints, rseps) + compareActivityLogs(t, activityLog, ractivity) err := os.Remove(snapshotFileName) require.NoError(t, err) } +func Test_CreateAndReadEmptySnapshot(t *testing.T) { + // clear all data + outputsWithMetadata = make([]*ledger.OutputWithMetadata, 0) + epochDiffs = make([]*ledger.EpochDiff, 0) + manaDistribution = createManaDistribution(cfgPledgeTokenAmount) + solidEntryPoints = make([]*SolidEntryPoints, 0) + + header := createEmptySnapshot(t) + + rheader, rseps, rstates, repochDiffs, ractivity := readSnapshot(t) + compareSnapshotHeader(t, header, rheader) + compareOutputWithMetadataSlice(t, outputsWithMetadata, rstates) + compareEpochDiffs(t, epochDiffs, repochDiffs) + compareActivityLogs(t, activityLog, ractivity) + compareSolidEntryPoints(t, solidEntryPoints, rseps) + + err := os.Remove(snapshotFileName) + require.NoError(t, err) +} + +func createEmptySnapshot(t *testing.T) (header *ledger.SnapshotHeader) { + fullEpochIndex := epoch.Index(0) + diffEpochIndex := epoch.Index(0) + + headerProd := func() (header *ledger.SnapshotHeader, err error) { + ecRecord := epoch.NewECRecord(diffEpochIndex) + ecRecord.SetECR(epoch.MerkleRoot{}) + ecRecord.SetPrevEC(epoch.MerkleRoot{}) + + header = &ledger.SnapshotHeader{ + FullEpochIndex: fullEpochIndex, + DiffEpochIndex: diffEpochIndex, + LatestECRecord: ecRecord, + } + + return + } + + // prepare outputsWithMetadata + utxoStatesProd := func() *ledger.OutputWithMetadata { + return nil + } + + epochDiffsProd := func() (diffs *ledger.EpochDiff) { + outputs := make([]*ledger.OutputWithMetadata, 0) + diffs = ledger.NewEpochDiff(outputs, outputs) + return + } + + seps := &SolidEntryPoints{EI: 0, Seps: make([]tangleold.BlockID, 0)} + solidEntryPoints = append(solidEntryPoints, seps) + sepsProd := func() (s *SolidEntryPoints) { + return seps + } + activityLogProd := func() (n epoch.SnapshotEpochActivity) { + return activityLog + } + + header, err := CreateSnapshot(snapshotFileName, headerProd, sepsProd, utxoStatesProd, epochDiffsProd, activityLogProd) + require.NoError(t, err) + + return header +} + func createSnapshot(t *testing.T) (header *ledger.SnapshotHeader) { - fullEpochIndex := 1 - diffEpochIndex := 3 + fullEpochIndex := epoch.Index(1) + diffEpochIndex := epoch.Index(3) headerProd := func() (header *ledger.SnapshotHeader, err error) { - ecRecord := epoch.NewECRecord(epoch.Index(diffEpochIndex)) + ecRecord := epoch.NewECRecord(diffEpochIndex) ecRecord.SetECR(epoch.MerkleRoot{}) ecRecord.SetPrevEC(epoch.MerkleRoot{}) header = &ledger.SnapshotHeader{ - FullEpochIndex: epoch.Index(fullEpochIndex), - DiffEpochIndex: epoch.Index(diffEpochIndex), + FullEpochIndex: fullEpochIndex, + DiffEpochIndex: diffEpochIndex, LatestECRecord: ecRecord, } @@ -65,7 +135,7 @@ func createSnapshot(t *testing.T) (header *ledger.SnapshotHeader) { } // prepare outputsWithMetadata - createsOutputsWithMetadatas(t, 110) + createsOutputsWithMetadatas(110) i := 0 utxoStatesProd := func() *ledger.OutputWithMetadata { if i == len(outputsWithMetadata) { @@ -77,40 +147,97 @@ func createSnapshot(t *testing.T) (header *ledger.SnapshotHeader) { return o } - epochDiffsProd := func() (diffs map[epoch.Index]*ledger.EpochDiff, err error) { - l, size := 0, 10 + // prepare epoch diffs + createsEpochDiffs(fullEpochIndex, diffEpochIndex) + k := 0 + epochDiffsProd := func() (diffs *ledger.EpochDiff) { + if i == len(epochDiffs) { + return nil + } + + d := epochDiffs[k] + k++ + return d + } + + solidEntryPoints = createSolidEntryPoints(t, fullEpochIndex, diffEpochIndex) + j := 0 + sepsProd := func() (s *SolidEntryPoints) { + if j == len(solidEntryPoints) { + return nil + } + s = solidEntryPoints[j] + j++ + return s + } - for i := fullEpochIndex + 1; i <= diffEpochIndex; i++ { - spent, created := make([]*ledger.OutputWithMetadata, 0), make([]*ledger.OutputWithMetadata, 0) - spent = append(spent, outputsWithMetadata[l*size:(l+1)*size]...) - created = append(created, outputsWithMetadata[(l+1)*size:(l+2)*size]...) + activityLogProd := func() epoch.SnapshotEpochActivity { + for ei := fullEpochIndex - 1; ei <= diffEpochIndex; ei++ { + activityLog[epoch.Index(ei)] = epoch.NewSnapshotNodeActivity() - epochDiffs[epoch.Index(i)] = ledger.NewEpochDiff(spent, created) - l += 2 + for _, str := range nodesToPledge { + nodeID, decodeErr := identity.DecodeIDBase58(str) + require.NoError(t, decodeErr) + + for r := 0; r < rand.Intn(10); r++ { + activityLog[epoch.Index(ei)].SetNodeActivity(nodeID, 1) + } + } } - return epochDiffs, nil + return activityLog } - header, err := CreateSnapshot(snapshotFileName, headerProd, utxoStatesProd, epochDiffsProd) + header, err := CreateSnapshot(snapshotFileName, headerProd, sepsProd, utxoStatesProd, epochDiffsProd, activityLogProd) + require.NoError(t, err) return header } -func readSnapshot(t *testing.T) (header *ledger.SnapshotHeader, states []*ledger.OutputWithMetadata, epochDiffs map[epoch.Index]*ledger.EpochDiff) { +func readSnapshot(t *testing.T) (header *ledger.SnapshotHeader, seps []*SolidEntryPoints, states []*ledger.OutputWithMetadata, epochDiffs []*ledger.EpochDiff, activity epoch.SnapshotEpochActivity) { outputWithMetadataConsumer := func(outputWithMetadatas []*ledger.OutputWithMetadata) { states = append(states, outputWithMetadatas...) } - epochDiffsConsumer := func(_ *ledger.SnapshotHeader, diffs map[epoch.Index]*ledger.EpochDiff) { - epochDiffs = diffs + epochDiffConsumer := func(diffs *ledger.EpochDiff) { + epochDiffs = append(epochDiffs, diffs) } headerConsumer := func(h *ledger.SnapshotHeader) { header = h } + activityLogConsumer := func(ea epoch.SnapshotEpochActivity) { + activity = ea + } + sepsConsumer := func(s *SolidEntryPoints) { + seps = append(seps, s) + } - err := LoadSnapshot(snapshotFileName, headerConsumer, outputWithMetadataConsumer, epochDiffsConsumer) + err := LoadSnapshot(snapshotFileName, headerConsumer, sepsConsumer, outputWithMetadataConsumer, epochDiffConsumer, activityLogConsumer) require.NoError(t, err) + return +} + +func createsEpochDiffs(fullEpochIndex, diffEpochIndex epoch.Index) { + l, size := 0, 10 + for i := fullEpochIndex + 1; i <= diffEpochIndex; i++ { + spent, created := make([]*ledger.OutputWithMetadata, 0), make([]*ledger.OutputWithMetadata, 0) + spent = append(spent, outputsWithMetadata[l*size:(l+1)*size]...) + created = append(created, outputsWithMetadata[(l+1)*size:(l+2)*size]...) + + epochDiffs = append(epochDiffs, ledger.NewEpochDiff(spent, created)) + l += 2 + } +} +func createSolidEntryPoints(t *testing.T, fullEpochIndex, diffEpochIndex epoch.Index) (seps []*SolidEntryPoints) { + for i := fullEpochIndex; i <= diffEpochIndex; i++ { + sep := &SolidEntryPoints{EI: i, Seps: make([]tangleold.BlockID, 0)} + for j := 0; j < 101; j++ { + var b tangleold.BlockID + require.NoError(t, b.FromRandomness(i)) + sep.Seps = append(sep.Seps, b) + } + seps = append(seps, sep) + } return } @@ -130,7 +257,7 @@ func createManaDistribution(totalTokensToPledge uint64) (manaDistribution map[id var outputCounter uint16 = 1 -func createsOutputsWithMetadatas(t *testing.T, total int) { +func createsOutputsWithMetadatas(total int) { now := time.Now() for i := 0; i < total; { for nodeID, value := range manaDistribution { @@ -173,6 +300,18 @@ func compareSnapshotHeader(t *testing.T, created, unmarshal *ledger.SnapshotHead assert.ElementsMatch(t, oLatestECRecordBytes, nLatestECRecordBytes) } +func compareSolidEntryPoints(t *testing.T, created, unmarshal []*SolidEntryPoints) { + assert.Equal(t, len(created), len(unmarshal)) + for i := 0; i < len(created); i++ { + assert.Equal(t, created[i].EI, unmarshal[i].EI) + for j := 0; j < len(created[i].Seps); j++ { + ob := created[i].Seps[j].Bytes() + rb := unmarshal[i].Seps[j].Bytes() + assert.ElementsMatch(t, ob, rb) + } + } +} + func compareOutputWithMetadataSlice(t *testing.T, created, unmarshal []*ledger.OutputWithMetadata) { assert.Equal(t, len(created), len(unmarshal)) for i := 0; i < len(created); i++ { @@ -184,13 +323,30 @@ func compareOutputWithMetadataSlice(t *testing.T, created, unmarshal []*ledger.O } } -func compareEpochDiffs(t *testing.T, created, unmarshal map[epoch.Index]*ledger.EpochDiff) { +func compareEpochDiffs(t *testing.T, created, unmarshal []*ledger.EpochDiff) { assert.Equal(t, len(created), len(unmarshal)) - for ei, diffs := range created { - uDiffs, ok := unmarshal[ei] - require.True(t, ok) + for i, diffs := range created { + uDiffs := unmarshal[i] compareOutputWithMetadataSlice(t, diffs.Spent(), uDiffs.Spent()) compareOutputWithMetadataSlice(t, diffs.Created(), uDiffs.Created()) } } + +func compareActivityLogs(t *testing.T, created, unmarshal epoch.SnapshotEpochActivity) { + assert.Equal(t, len(created), len(unmarshal)) + for ei, al := range created { + uLog, ok := unmarshal[ei] + require.True(t, ok) + compareActivityLog(t, al, uLog) + } +} + +func compareActivityLog(t *testing.T, created, unmarshal *epoch.SnapshotNodeActivity) { + require.Equal(t, len(created.NodesLog()), len(unmarshal.NodesLog())) + for nodeID, acceptedCount := range created.NodesLog() { + same := unmarshal.NodeActivity(nodeID) == acceptedCount + require.True(t, same) + + } +} diff --git a/packages/core/snapshot/write.go b/packages/core/snapshot/write.go index ffa0a16bd8..7e63d8a5f0 100644 --- a/packages/core/snapshot/write.go +++ b/packages/core/snapshot/write.go @@ -6,6 +6,7 @@ import ( "fmt" "io" + "github.com/cockroachdb/errors" "github.com/iotaledger/hive.go/core/serix" "github.com/iotaledger/goshimmer/packages/core/epoch" @@ -13,36 +14,21 @@ import ( "github.com/iotaledger/goshimmer/packages/core/notarization" ) -const utxoStatesChunkSize = 100 - -var delimiter = []byte{';', ';', ';'} +const chunkSize = 100 // streamSnapshotDataTo writes snapshot to a given writer. func streamSnapshotDataTo( writeSeeker io.WriteSeeker, headerProd HeaderProducerFunc, + sepsProd SolidEntryPointsProducerFunc, outputProd UTXOStatesProducerFunc, - epochDiffsProd EpochDiffProducerFunc) (*ledger.SnapshotHeader, error) { + epochDiffsProd EpochDiffProducerFunc, + activityLogProd ActivityLogProducerFunc) (*ledger.SnapshotHeader, error) { writeFunc := func(name string, value any) error { return writeFunc(writeSeeker, name, value) } - writeOutputWithMetadatasFunc := func(chunks []*ledger.OutputWithMetadata) error { - if len(chunks) == 0 { - return nil - } - - data, err := serix.DefaultAPI.Encode(context.Background(), chunks, serix.WithValidation()) - if err != nil { - return err - } - if err := writeFunc("outputs", append(data, delimiter...)); err != nil { - return err - } - return nil - } - header, err := headerProd() if err != nil { return nil, err @@ -50,7 +36,15 @@ func streamSnapshotDataTo( err = writeSnapshotHeader(writeSeeker, header) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to write snapshot header to snapshot") + } + + // write solid entry points + for i := header.FullEpochIndex; i <= header.DiffEpochIndex; i++ { + seps := sepsProd() + if err := writeSolidEntryPoints(writeSeeker, seps); err != nil { + return nil, errors.Wrap(err, "failed to write solid entry points to snapshot") + } } // write outputWithMetadata @@ -61,20 +55,19 @@ func streamSnapshotDataTo( output := outputProd() if output == nil { // write rests of outputWithMetadatas - err = writeOutputWithMetadatasFunc(chunksOutputWithMetadata) + err = writeOutputsWithMetadatas(writeSeeker, chunksOutputWithMetadata) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "failed to write outputs metadata to snapshot") } break } - outputWithMetadataCounter++ outputChunkCounter++ chunksOutputWithMetadata = append(chunksOutputWithMetadata, output) - // put a delimeter every utxoStatesChunkSize outputs - if outputChunkCounter == utxoStatesChunkSize { - err = writeOutputWithMetadatasFunc(chunksOutputWithMetadata) + // put a delimiter every chunkSize outputs + if outputChunkCounter == chunkSize { + err = writeOutputsWithMetadatas(writeSeeker, chunksOutputWithMetadata) if err != nil { return nil, err } @@ -84,24 +77,24 @@ func streamSnapshotDataTo( } // write epochDiffs - epochDiffs, err := epochDiffsProd() - if err != nil { - return nil, err + for i := header.FullEpochIndex + 1; i <= header.DiffEpochIndex; i++ { + epochDiffs := epochDiffsProd() + if epochErr := writeEpochDiffs(writeSeeker, epochDiffs); epochErr != nil { + return nil, errors.Wrap(epochErr, "failed to write epochDiffs to snapshot") + } } - bytes, err := serix.DefaultAPI.Encode(context.Background(), epochDiffs, serix.WithValidation()) - if err != nil { - return nil, err - } - if err := writeFunc(fmt.Sprintf("diffEpoch"), append(bytes, delimiter...)); err != nil { - return nil, err + // write active nodes + activeNodes := activityLogProd() + if actErr := writeActivityLog(writeSeeker, activeNodes); actErr != nil { + return nil, errors.Wrap(actErr, "failed to write activity log to snapshot") } // seek back to the file position of the outputWithMetadata counter if _, err := writeSeeker.Seek(0, io.SeekStart); err != nil { - return nil, fmt.Errorf("unable to seek to LS counter placeholders: %w", err) + return nil, errors.Errorf("unable to seek to LS counter placeholders: %w", err) } - if err := writeFunc(fmt.Sprintf("outputWithMetadata counter %d", outputWithMetadataCounter), outputWithMetadataCounter); err != nil { + if err = writeFunc(fmt.Sprintf("outputWithMetadata counter %d", outputWithMetadataCounter), outputWithMetadataCounter); err != nil { return nil, err } header.OutputWithMetadataCount = outputWithMetadataCounter @@ -109,43 +102,223 @@ func streamSnapshotDataTo( return header, nil } +// NewSolidEntryPointsProducer returns a SolidEntryPointsProducerFunc that provide solid entry points from the snapshot manager. +func NewSolidEntryPointsProducer(fullEpochIndex, latestCommitableEpoch epoch.Index, smgr *Manager) SolidEntryPointsProducerFunc { + prodChan := make(chan *SolidEntryPoints) + stopChan := make(chan struct{}) + smgr.snapshotSolidEntryPoints(fullEpochIndex, latestCommitableEpoch, prodChan, stopChan) + + return func() *SolidEntryPoints { + select { + case obj := <-prodChan: + return obj + case <-stopChan: + close(prodChan) + return nil + } + } +} + // NewLedgerUTXOStatesProducer returns a OutputWithMetadataProducerFunc that provide OutputWithMetadatas from the ledger. -func NewLedgerUTXOStatesProducer(lastConfirmedEpoch epoch.Index, nmgr *notarization.Manager) UTXOStatesProducerFunc { +func NewLedgerUTXOStatesProducer(nmgr *notarization.Manager) UTXOStatesProducerFunc { prodChan := make(chan *ledger.OutputWithMetadata) - nmgr.SnapshotLedgerState(lastConfirmedEpoch, prodChan) + stopChan := make(chan struct{}) + nmgr.SnapshotLedgerState(prodChan, stopChan) return func() *ledger.OutputWithMetadata { - obj, ok := <-prodChan - if !ok { + select { + case obj := <-prodChan: + return obj + case <-stopChan: + close(prodChan) return nil } - return obj } } // NewEpochDiffsProducer returns a OutputWithMetadataProducerFunc that provide OutputWithMetadatas from the ledger. -func NewEpochDiffsProducer(lastConfirmedEpoch, latestCommitableEpoch epoch.Index, nmgr *notarization.Manager) EpochDiffProducerFunc { - epochDiffs, err := nmgr.SnapshotEpochDiffs(lastConfirmedEpoch, latestCommitableEpoch) +func NewEpochDiffsProducer(fullEpochIndex, latestCommitableEpoch epoch.Index, nmgr *notarization.Manager) EpochDiffProducerFunc { + prodChan := make(chan *ledger.EpochDiff) + stopChan := make(chan struct{}) + nmgr.SnapshotEpochDiffs(fullEpochIndex, latestCommitableEpoch, prodChan, stopChan) + + return func() *ledger.EpochDiff { + select { + case obj := <-prodChan: + return obj + case <-stopChan: + close(prodChan) + return nil + } + } +} - return func() (map[epoch.Index]*ledger.EpochDiff, error) { - return epochDiffs, err +func writeEpochDiffs(writeSeeker io.WriteSeeker, diffs *ledger.EpochDiff) error { + writeFuncWrap := func(name string, value any) error { + return writeFunc(writeSeeker, name, value) + } + + spentLen := len(diffs.Spent()) + if err := writeFuncWrap("epochDiffs spent Len", int64(spentLen)); err != nil { + return err + } + + s := diffs.Spent() + var end int + for i := 0; i < spentLen; { + if i+chunkSize > spentLen { + end = spentLen + } else { + end = i + chunkSize + } + if err := writeOutputsWithMetadatas(writeSeeker, s[i:end]); err != nil { + return errors.Wrap(err, "unable to write output with metadata to snapshot") + } + i = end + } + + createdLen := len(diffs.Created()) + if err := writeFuncWrap("epochDiffs created Len", int64(createdLen)); err != nil { + return err } + c := diffs.Created() + for i := 0; i < createdLen; { + if i+chunkSize > createdLen { + end = createdLen + } else { + end = i + chunkSize + } + if err := writeOutputsWithMetadatas(writeSeeker, c[i:end]); err != nil { + return errors.Wrap(err, "unable to write output with metadata to snapshot") + } + i = end + } + + return nil +} + +func writeActivityLog(writeSeeker io.WriteSeeker, activityLog epoch.SnapshotEpochActivity) error { + writeFuncWrap := func(name string, value any) error { + return writeFunc(writeSeeker, name, value) + } + // write activityLog length + activityLen := len(activityLog) + if err := writeFuncWrap("activity log len", int64(activityLen)); err != nil { + return err + } + + for ei, al := range activityLog { + // write epoch index + if err := writeFuncWrap("epoch index", ei); err != nil { + return err + } + // write activity log + alBytes, err := al.Bytes() + if err != nil { + return err + } + + if err := writeFuncWrap("activity log bytes len", int64(len(alBytes))); err != nil { + return err + } + + if err := writeFuncWrap("activity log", alBytes); err != nil { + return err + } + } + return nil +} + +func writeSolidEntryPoints(writeSeeker io.WriteSeeker, seps *SolidEntryPoints) error { + writeFuncWrap := func(name string, value any) error { + return writeFunc(writeSeeker, name, value) + } + + // write EI + if err := writeFuncWrap("solid entry points epoch", seps.EI); err != nil { + return err + } + + // write number of solid entry points + sepsLen := len(seps.Seps) + if err := writeFuncWrap("solid entry points Len", int64(sepsLen)); err != nil { + return err + } + + // write solid entry points in chunks + s := seps.Seps + var end int + for i := 0; i < sepsLen; { + if i+chunkSize > sepsLen { + end = sepsLen + } else { + end = i + chunkSize + } + + data, err := serix.DefaultAPI.Encode(context.Background(), s[i:end], serix.WithValidation()) + if err != nil { + return err + } + + if err := writeFuncWrap("sepsBytesLen", int64(len(data))); err != nil { + return err + } + if err := writeFuncWrap("seps", data); err != nil { + return err + } + + i = end + } + return nil +} + +// NewActivityLogProducer returns an ActivityLogProducerFunc that provides activity log from weightProvider and notarization manager. +func NewActivityLogProducer(notarizationMgr *notarization.Manager, epochDiffIndex epoch.Index) ActivityLogProducerFunc { + activityLog, err := notarizationMgr.SnapshotEpochActivity(epochDiffIndex) + if err != nil { + panic(err) + } + return func() (activityLogs epoch.SnapshotEpochActivity) { + return activityLog + } +} + +func writeOutputsWithMetadatas(writeSeeker io.WriteSeeker, outputsChunks []*ledger.OutputWithMetadata) error { + if len(outputsChunks) == 0 { + return nil + } + + writeFuncWrap := func(name string, value any) error { + return writeFunc(writeSeeker, name, value) + } + + data, err := serix.DefaultAPI.Encode(context.Background(), outputsChunks, serix.WithValidation()) + if err != nil { + return err + } + if err := writeFuncWrap("outputsBytesLen", int64(len(data))); err != nil { + return err + } + if err := writeFuncWrap("outputs", data); err != nil { + return err + } + return nil } func writeSnapshotHeader(writeSeeker io.WriteSeeker, header *ledger.SnapshotHeader) error { - writeFunc := func(name string, value any) error { + writeFuncWrap := func(name string, value any) error { return writeFunc(writeSeeker, name, value) } - if err := writeFunc(fmt.Sprintf("outputWithMetadata counter %d", header.OutputWithMetadataCount), header.OutputWithMetadataCount); err != nil { + if err := writeFuncWrap(fmt.Sprintf("outputWithMetadata counter %d", header.OutputWithMetadataCount), header.OutputWithMetadataCount); err != nil { return err } - if err := writeFunc(fmt.Sprintf("fullEpochIndex %d", header.FullEpochIndex), header.FullEpochIndex); err != nil { + if err := writeFuncWrap(fmt.Sprintf("fullEpochIndex %d", header.FullEpochIndex), header.FullEpochIndex); err != nil { return err } - if err := writeFunc(fmt.Sprintf("diffEpochIndex %d", header.DiffEpochIndex), header.DiffEpochIndex); err != nil { + if err := writeFuncWrap(fmt.Sprintf("diffEpochIndex %d", header.DiffEpochIndex), header.DiffEpochIndex); err != nil { return err } @@ -154,8 +327,12 @@ func writeSnapshotHeader(writeSeeker io.WriteSeeker, header *ledger.SnapshotHead return err } - if err := writeFunc("latestECRecord", append(data, delimiter...)); err != nil { - return err + if latestLenErr := writeFuncWrap("latestECRecordBytesLen", int64(len(data))); latestLenErr != nil { + return latestLenErr + } + + if latestErr := writeFuncWrap("latestECRecord", data); err != nil { + return latestErr } return nil @@ -164,11 +341,11 @@ func writeSnapshotHeader(writeSeeker io.WriteSeeker, header *ledger.SnapshotHead func writeFunc(writeSeeker io.WriteSeeker, variableName string, value any) error { length := binary.Size(value) if length == -1 { - return fmt.Errorf("unable to determine length of %s", variableName) + return errors.Errorf("unable to determine length of %s", variableName) } if err := binary.Write(writeSeeker, binary.LittleEndian, value); err != nil { - return fmt.Errorf("unable to write LS %s: %w", variableName, err) + return errors.Errorf("unable to write LS %s: %w", variableName, err) } return nil diff --git a/packages/core/tangleold/approvalweightmanager.models.go b/packages/core/tangleold/approvalweightmanager.models.go index f70d2053f1..23ce3e0632 100644 --- a/packages/core/tangleold/approvalweightmanager.models.go +++ b/packages/core/tangleold/approvalweightmanager.models.go @@ -122,9 +122,9 @@ func (v *Voters) Intersect(other *Voters) (intersection *Voters) { // String returns a human-readable version of the Voters. func (v *Voters) String() string { - structBuilder := stringify.StructBuilder("Voters") + structBuilder := stringify.NewStructBuilder("Voters") v.Set.ForEach(func(voter Voter) { - structBuilder.AddField(stringify.StructField(voter.String(), "true")) + structBuilder.AddField(stringify.NewStructField(voter.String(), "true")) }) return structBuilder.String() diff --git a/packages/core/tangleold/approvalweightmanager_test.go b/packages/core/tangleold/approvalweightmanager_test.go index 6f51308a8b..ccb9b3adce 100644 --- a/packages/core/tangleold/approvalweightmanager_test.go +++ b/packages/core/tangleold/approvalweightmanager_test.go @@ -3,6 +3,7 @@ package tangleold import ( "fmt" + "github.com/iotaledger/goshimmer/packages/core/epoch" "testing" "time" @@ -28,13 +29,15 @@ func BenchmarkApprovalWeightManager_ProcessBlock_Conflicts(b *testing.B) { var weightProvider *CManaWeightProvider manaRetrieverMock := func() map[identity.ID]float64 { m := make(map[identity.ID]float64) + ei := epoch.IndexFromTime(time.Now()) for _, s := range voters { - weightProvider.Update(time.Now(), s.ID()) + weightProvider.Update(ei, s.ID()) m[s.ID()] = 100 } return m } - weightProvider = NewCManaWeightProvider(manaRetrieverMock, time.Now) + confirmedRetrieverFunc := func() epoch.Index { return 0 } + weightProvider = NewCManaWeightProvider(manaRetrieverMock, time.Now, confirmedRetrieverFunc) tangle := NewTestTangle(ApprovalWeights(weightProvider)) defer tangle.Shutdown() @@ -101,12 +104,15 @@ func TestApprovalWeightManager_updateConflictVoters(t *testing.T) { var weightProvider *CManaWeightProvider manaRetrieverMock := func() map[identity.ID]float64 { nodeID := identity.NewID(keyPair.PublicKey) - weightProvider.Update(time.Now(), nodeID) + ei := epoch.IndexFromTime(time.Now()) + + weightProvider.Update(ei, nodeID) return map[identity.ID]float64{ nodeID: 100, } } - weightProvider = NewCManaWeightProvider(manaRetrieverMock, time.Now) + confirmedRetrieverFunc := func() epoch.Index { return 0 } + weightProvider = NewCManaWeightProvider(manaRetrieverMock, time.Now, confirmedRetrieverFunc) tangle := NewTestTangle(ApprovalWeights(weightProvider), WithConflictDAGOptions(conflictdag.WithMergeToMaster(false))) defer tangle.Shutdown() @@ -134,20 +140,20 @@ func TestApprovalWeightManager_updateConflictVoters(t *testing.T) { "Conflict 4.2": set.NewAdvancedSet(randomConflictID()), } - createConflict(t, tangle, "Conflict 1", conflictIDs, set.NewAdvancedSet[utxo.TransactionID](), resourceIDs["Conflict 1"]) - createConflict(t, tangle, "Conflict 2", conflictIDs, set.NewAdvancedSet[utxo.TransactionID](), resourceIDs["Conflict 1"]) - createConflict(t, tangle, "Conflict 3", conflictIDs, set.NewAdvancedSet[utxo.TransactionID](), resourceIDs["Conflict 2"]) - createConflict(t, tangle, "Conflict 4", conflictIDs, set.NewAdvancedSet[utxo.TransactionID](), resourceIDs["Conflict 2"]) + createConflict(tangle, "Conflict 1", conflictIDs, set.NewAdvancedSet[utxo.TransactionID](), resourceIDs["Conflict 1"]) + createConflict(tangle, "Conflict 2", conflictIDs, set.NewAdvancedSet[utxo.TransactionID](), resourceIDs["Conflict 1"]) + createConflict(tangle, "Conflict 3", conflictIDs, set.NewAdvancedSet[utxo.TransactionID](), resourceIDs["Conflict 2"]) + createConflict(tangle, "Conflict 4", conflictIDs, set.NewAdvancedSet[utxo.TransactionID](), resourceIDs["Conflict 2"]) - createConflict(t, tangle, "Conflict 1.1", conflictIDs, conflictIDs["Conflict 1"], resourceIDs["Conflict 3"]) - createConflict(t, tangle, "Conflict 1.2", conflictIDs, conflictIDs["Conflict 1"], resourceIDs["Conflict 3"]) - createConflict(t, tangle, "Conflict 1.3", conflictIDs, conflictIDs["Conflict 1"], resourceIDs["Conflict 3"]) + createConflict(tangle, "Conflict 1.1", conflictIDs, conflictIDs["Conflict 1"], resourceIDs["Conflict 3"]) + createConflict(tangle, "Conflict 1.2", conflictIDs, conflictIDs["Conflict 1"], resourceIDs["Conflict 3"]) + createConflict(tangle, "Conflict 1.3", conflictIDs, conflictIDs["Conflict 1"], resourceIDs["Conflict 3"]) - createConflict(t, tangle, "Conflict 4.1", conflictIDs, conflictIDs["Conflict 4"], resourceIDs["Conflict 4"]) - createConflict(t, tangle, "Conflict 4.2", conflictIDs, conflictIDs["Conflict 4"], resourceIDs["Conflict 4"]) + createConflict(tangle, "Conflict 4.1", conflictIDs, conflictIDs["Conflict 4"], resourceIDs["Conflict 4"]) + createConflict(tangle, "Conflict 4.2", conflictIDs, conflictIDs["Conflict 4"], resourceIDs["Conflict 4"]) - createConflict(t, tangle, "Conflict 4.1.1", conflictIDs, conflictIDs["Conflict 4.1"], resourceIDs["Conflict 5"]) - createConflict(t, tangle, "Conflict 4.1.2", conflictIDs, conflictIDs["Conflict 4.1"], resourceIDs["Conflict 5"]) + createConflict(tangle, "Conflict 4.1.1", conflictIDs, conflictIDs["Conflict 4.1"], resourceIDs["Conflict 5"]) + createConflict(tangle, "Conflict 4.1.2", conflictIDs, conflictIDs["Conflict 4.1"], resourceIDs["Conflict 5"]) conflictIDs["Conflict 1.1 + Conflict 4.1.1"] = set.NewAdvancedSet[utxo.TransactionID]() conflictIDs["Conflict 1.1 + Conflict 4.1.1"].AddAll(conflictIDs["Conflict 1.1"]) @@ -250,12 +256,15 @@ func TestApprovalWeightManager_updateSequenceVoters(t *testing.T) { manaRetrieverMock := func() map[identity.ID]float64 { m := make(map[identity.ID]float64) for _, s := range voters { - weightProvider.Update(time.Now(), s.ID()) + ei := epoch.IndexFromTime(time.Now()) + + weightProvider.Update(ei, s.ID()) m[s.ID()] = 100 } return m } - weightProvider = NewCManaWeightProvider(manaRetrieverMock, time.Now) + confirmedRetrieverFunc := func() epoch.Index { return 0 } + weightProvider = NewCManaWeightProvider(manaRetrieverMock, time.Now, confirmedRetrieverFunc) tangle := NewTestTangle(ApprovalWeights(weightProvider)) defer tangle.Shutdown() @@ -402,7 +411,8 @@ func TestAggregatedConflictApproval(t *testing.T) { var weightProvider *CManaWeightProvider manaRetrieverMock := func() map[identity.ID]float64 { for _, node := range nodes { - weightProvider.Update(time.Now(), node.ID()) + ei := epoch.IndexFromTime(time.Now()) + weightProvider.Update(ei, node.ID()) } return map[identity.ID]float64{ nodes["A"].ID(): 30, @@ -412,7 +422,8 @@ func TestAggregatedConflictApproval(t *testing.T) { nodes["E"].ID(): 10, } } - weightProvider = NewCManaWeightProvider(manaRetrieverMock, time.Now) + confirmedRetrieverFunc := func() epoch.Index { return 0 } + weightProvider = NewCManaWeightProvider(manaRetrieverMock, time.Now, confirmedRetrieverFunc) tangle := NewTestTangle(ApprovalWeights(weightProvider)) defer tangle.Shutdown() @@ -494,7 +505,8 @@ func TestOutOfOrderStatements(t *testing.T) { var weightProvider *CManaWeightProvider manaRetrieverMock := func() map[identity.ID]float64 { for _, node := range nodes { - weightProvider.Update(time.Now(), node.ID()) + ei := epoch.IndexFromTime(time.Now()) + weightProvider.Update(ei, node.ID()) } return map[identity.ID]float64{ nodes["A"].ID(): 30, @@ -504,7 +516,8 @@ func TestOutOfOrderStatements(t *testing.T) { nodes["E"].ID(): 10, } } - weightProvider = NewCManaWeightProvider(manaRetrieverMock, time.Now) + confirmedRetrieverFunc := func() epoch.Index { return 0 } + weightProvider = NewCManaWeightProvider(manaRetrieverMock, time.Now, confirmedRetrieverFunc) tangle := NewTestTangle(ApprovalWeights(weightProvider), WithConflictDAGOptions(conflictdag.WithMergeToMaster(false))) tangle.Booker.MarkersManager.Options.MaxPastMarkerDistance = 3 @@ -884,7 +897,7 @@ func getSingleConflict(conflictes map[string]*set.AdvancedSet[utxo.TransactionID return utxo.EmptyTransactionID } -func createConflict(t *testing.T, tangle *Tangle, conflictAlias string, conflictIDs map[string]*set.AdvancedSet[utxo.TransactionID], parentConflictIDs *set.AdvancedSet[utxo.TransactionID], conflictID utxo.OutputID) { +func createConflict(tangle *Tangle, conflictAlias string, conflictIDs map[string]*set.AdvancedSet[utxo.TransactionID], parentConflictIDs *set.AdvancedSet[utxo.TransactionID], conflictID utxo.OutputID) { conflict := getSingleConflict(conflictIDs, conflictAlias) tangle.Ledger.ConflictDAG.CreateConflict(conflict, parentConflictIDs, set.NewAdvancedSet(conflictID)) conflict.RegisterAlias(conflictAlias) diff --git a/packages/core/tangleold/block.go b/packages/core/tangleold/block.go index 087e253456..9142d7bd01 100644 --- a/packages/core/tangleold/block.go +++ b/packages/core/tangleold/block.go @@ -357,7 +357,7 @@ func (m BlockIDs) String() string { result := "BlockIDs{\n" for blockID := range m { - result += strings.Repeat(" ", stringify.INDENTATION_SIZE) + blockID.String() + ",\n" + result += strings.Repeat(" ", stringify.IndentationSize) + blockID.String() + ",\n" } result += "}" @@ -386,7 +386,7 @@ type BlockModel struct { IssuingTime time.Time `serix:"3"` SequenceNumber uint64 `serix:"4"` PayloadBytes []byte `serix:"5,lengthPrefixType=uint32"` - EI epoch.Index `serix:"6"` + ECRecordEI epoch.Index `serix:"6"` ECR epoch.ECR `serix:"7"` PrevEC epoch.EC `serix:"8"` LatestConfirmedEpoch epoch.Index `serix:"9"` @@ -410,7 +410,7 @@ func NewBlock(references ParentBlockIDs, issuingTime time.Time, issuerPublicKey IssuingTime: issuingTime, SequenceNumber: sequenceNumber, PayloadBytes: lo.PanicOnErr(blkPayload.Bytes()), - EI: ecRecord.EI(), + ECRecordEI: ecRecord.EI(), ECR: ecRecord.ECR(), PrevEC: ecRecord.PrevEC(), LatestConfirmedEpoch: latestConfirmedEpoch, @@ -441,6 +441,14 @@ func NewBlockWithValidation(references ParentBlockIDs, issuingTime time.Time, is return blk, nil } +// FromBytes unmarshals a Block from a sequence of bytes. +func (m *Block) FromBytes(bytes []byte) (err error) { + if err = m.Storable.FromBytes(bytes); err != nil { + return + } + return m.DetermineID() +} + // VerifySignature verifies the Signature of the block. func (m *Block) VerifySignature() (valid bool, err error) { blkBytes, err := m.Bytes() @@ -548,9 +556,9 @@ func (m *Block) Nonce() uint64 { return m.M.Nonce } -// EI returns the EI of the block. -func (m *Block) EI() epoch.Index { - return m.M.EI +// ECRecordEI returns the EI of the ECRecord a block contains. +func (m *Block) ECRecordEI() epoch.Index { + return m.M.ECRecordEI } // ECR returns the ECR of the block. @@ -590,24 +598,24 @@ func (m *Block) Size() int { } func (m *Block) String() string { - builder := stringify.StructBuilder("Block", stringify.StructField("id", m.ID())) + builder := stringify.NewStructBuilder("Block", stringify.NewStructField("id", m.ID())) for index, parent := range sortParents(m.ParentsByType(StrongParentType)) { - builder.AddField(stringify.StructField(fmt.Sprintf("strongParent%d", index), parent.String())) + builder.AddField(stringify.NewStructField(fmt.Sprintf("strongParent%d", index), parent.String())) } for index, parent := range sortParents(m.ParentsByType(WeakParentType)) { - builder.AddField(stringify.StructField(fmt.Sprintf("weakParent%d", index), parent.String())) + builder.AddField(stringify.NewStructField(fmt.Sprintf("weakParent%d", index), parent.String())) } for index, parent := range sortParents(m.ParentsByType(ShallowLikeParentType)) { - builder.AddField(stringify.StructField(fmt.Sprintf("shallowlikeParent%d", index), parent.String())) + builder.AddField(stringify.NewStructField(fmt.Sprintf("shallowlikeParent%d", index), parent.String())) } - builder.AddField(stringify.StructField("Issuer", m.IssuerPublicKey())) - builder.AddField(stringify.StructField("IssuingTime", m.IssuingTime())) - builder.AddField(stringify.StructField("SequenceNumber", m.SequenceNumber())) - builder.AddField(stringify.StructField("Payload", m.Payload())) - builder.AddField(stringify.StructField("Nonce", m.Nonce())) - builder.AddField(stringify.StructField("Signature", m.Signature())) + builder.AddField(stringify.NewStructField("Issuer", m.IssuerPublicKey())) + builder.AddField(stringify.NewStructField("IssuingTime", m.IssuingTime())) + builder.AddField(stringify.NewStructField("SequenceNumber", m.SequenceNumber())) + builder.AddField(stringify.NewStructField("Payload", m.Payload())) + builder.AddField(stringify.NewStructField("Nonce", m.Nonce())) + builder.AddField(stringify.NewStructField("Signature", m.Signature())) return builder.String() } diff --git a/packages/core/tangleold/block_test.go b/packages/core/tangleold/block_test.go index d6cd2a3902..8f054e534b 100644 --- a/packages/core/tangleold/block_test.go +++ b/packages/core/tangleold/block_test.go @@ -655,7 +655,7 @@ func TestBlockFromBytes(t *testing.T) { } func randomTransaction() *devnetvm.Transaction { - ID, _ := identity.RandomID() + ID, _ := identity.RandomIDInsecure() input := devnetvm.NewUTXOInput(utxo.EmptyOutputID) var outputs devnetvm.Outputs seed := ed25519.NewSeed() diff --git a/packages/core/tangleold/cmanaweightprovider.go b/packages/core/tangleold/cmanaweightprovider.go index 97f62efa26..7c75cfb017 100644 --- a/packages/core/tangleold/cmanaweightprovider.go +++ b/packages/core/tangleold/cmanaweightprovider.go @@ -1,53 +1,49 @@ package tangleold import ( - "container/heap" - "context" - "fmt" - "strings" + "github.com/iotaledger/goshimmer/packages/core/epoch" + "github.com/iotaledger/hive.go/core/generics/shrinkingmap" + "github.com/iotaledger/hive.go/core/types" "sync" "time" "github.com/cockroachdb/errors" - "github.com/iotaledger/hive.go/core/generics/set" "github.com/iotaledger/hive.go/core/identity" "github.com/iotaledger/hive.go/core/kvstore" - "github.com/iotaledger/hive.go/core/serix" ) -func init() { - err := serix.DefaultAPI.RegisterTypeSettings(NodesActivityLog{}, serix.TypeSettings{}.WithLengthPrefixType(serix.LengthPrefixTypeAsUint32)) - if err != nil { - panic(fmt.Errorf("error registering GenericDataPayload type settings: %w", err)) - } -} - const ( - activeTimeThreshold = 5 * time.Minute minimumManaThreshold = 0 activeNodesKey = "WeightProviderActiveNodes" + // activeEpochThreshold defines the activity window in number of epochs. + activeEpochThreshold = 15 ) // region CManaWeightProvider ////////////////////////////////////////////////////////////////////////////////////////// -type NodesActivityLog map[identity.ID]*ActivityLog +// ActivityUpdatesCount stores the counters on how many times activity record was updated. +type ActivityUpdatesCount map[identity.ID]uint64 // CManaWeightProvider is a WeightProvider for consensus mana. It keeps track of active nodes based on their time-based // activity in relation to activeTimeThreshold. type CManaWeightProvider struct { - store kvstore.KVStore - mutex sync.RWMutex - activeNodes NodesActivityLog - manaRetrieverFunc ManaRetrieverFunc - timeRetrieverFunc TimeRetrieverFunc + store kvstore.KVStore + mutex sync.RWMutex + activityLog *epoch.NodesActivityLog + updatedActivityCount *shrinkingmap.ShrinkingMap[epoch.Index, ActivityUpdatesCount] + manaRetrieverFunc ManaRetrieverFunc + timeRetrieverFunc TimeRetrieverFunc + confirmedEpochRetrieverFunc ConfirmedEpochRetrieverFunc } // NewCManaWeightProvider is the constructor for CManaWeightProvider. -func NewCManaWeightProvider(manaRetrieverFunc ManaRetrieverFunc, timeRetrieverFunc TimeRetrieverFunc, store ...kvstore.KVStore) (cManaWeightProvider *CManaWeightProvider) { +func NewCManaWeightProvider(manaRetrieverFunc ManaRetrieverFunc, timeRetrieverFunc TimeRetrieverFunc, confirmedEpochRetrieverFunc ConfirmedEpochRetrieverFunc, store ...kvstore.KVStore) (cManaWeightProvider *CManaWeightProvider) { cManaWeightProvider = &CManaWeightProvider{ - activeNodes: make(NodesActivityLog), - manaRetrieverFunc: manaRetrieverFunc, - timeRetrieverFunc: timeRetrieverFunc, + activityLog: epoch.NewNodesActivityLog(), + updatedActivityCount: shrinkingmap.New[epoch.Index, ActivityUpdatesCount](shrinkingmap.WithShrinkingThresholdCount(100)), + manaRetrieverFunc: manaRetrieverFunc, + timeRetrieverFunc: timeRetrieverFunc, + confirmedEpochRetrieverFunc: confirmedEpochRetrieverFunc, } if len(store) == 0 { @@ -62,7 +58,8 @@ func NewCManaWeightProvider(manaRetrieverFunc ManaRetrieverFunc, timeRetrieverFu } // Load from storage if key was found. if marshaledActiveNodes != nil { - if cManaWeightProvider.activeNodes, err = activeNodesFromBytes(marshaledActiveNodes); err != nil { + + if err = cManaWeightProvider.activityLog.FromBytes(marshaledActiveNodes); err != nil { panic(err) } return @@ -72,24 +69,42 @@ func NewCManaWeightProvider(manaRetrieverFunc ManaRetrieverFunc, timeRetrieverFu } // Update updates the underlying data structure and keeps track of active nodes. -func (c *CManaWeightProvider) Update(t time.Time, nodeID identity.ID) { - // We only want to log node activity that is relevant, i.e., node activity before TangleTime-activeTimeThreshold - // does not matter anymore since the TangleTime advances towards the present/future. - staleThreshold := c.timeRetrieverFunc().Add(-activeTimeThreshold) - if t.Before(staleThreshold) { - return - } - +func (c *CManaWeightProvider) Update(ei epoch.Index, nodeID identity.ID) { + // We don't check if the epoch index is too old, as this is handled by the NotarizationManager c.mutex.Lock() defer c.mutex.Unlock() - a, exists := c.activeNodes[nodeID] + a, exists := c.activityLog.Get(ei) if !exists { - a = NewActivityLog() - c.activeNodes[nodeID] = a + a = epoch.NewActivityLog() + c.activityLog.Set(ei, a) } - a.Add(t) + a.Add(nodeID) + + c.updateActivityCount(ei, nodeID, 1) +} + +// Remove updates the underlying data structure by decreasing updatedActivityCount and removing node from active list if no activity left. +func (c *CManaWeightProvider) Remove(ei epoch.Index, nodeID identity.ID, updatedActivityCount uint64) (removed bool) { + c.mutex.Lock() + defer c.mutex.Unlock() + + epochUpdatesCount, exist := c.updatedActivityCount.Get(ei) + if exist { + _, exists := epochUpdatesCount[nodeID] + if exists { + epochUpdatesCount[nodeID] -= updatedActivityCount + } + } + // if that was the last activity for this node in the ei epoch, then remove it from activity list + if epochUpdatesCount[nodeID] == 0 { + if a, exists := c.activityLog.Get(ei); exists { + a.Remove(nodeID) + return true + } + } + return false } // Weight returns the weight and total weight for the given block. @@ -103,54 +118,96 @@ func (c *CManaWeightProvider) WeightsOfRelevantVoters() (weights map[identity.ID weights = make(map[identity.ID]float64) mana := c.manaRetrieverFunc() - targetTime := c.timeRetrieverFunc() - lowerBoundTargetTime := targetTime.Add(-activeTimeThreshold) - c.mutex.Lock() - defer c.mutex.Unlock() - for nodeID, al := range c.activeNodes { - nodeMana := mana[nodeID] + lowerBoundEpoch, upperBoundEpoch := c.activityBoundaries() - // Determine whether node was active in time window. - if active, empty := al.Active(lowerBoundTargetTime, targetTime); !active { - if empty { - delete(c.activeNodes, nodeID) - } - continue - } + c.mutex.Lock() + defer c.mutex.Unlock() - // Do this check after determining whether a node was active because otherwise we would never clean up - // the ActivityLog of nodes lower than the threshold. - // Skip node if it does not fulfill minimumManaThreshold. - if nodeMana <= minimumManaThreshold { + // nodes mana is counted only once for total weight calculation + totalWeightOnce := make(map[identity.ID]types.Empty) + for ei := lowerBoundEpoch; ei <= upperBoundEpoch; ei++ { + al, exists := c.activityLog.Get(ei) + if !exists { continue } + al.ForEach(func(nodeID identity.ID) error { + nodeMana := mana[nodeID] + // Do this check after determining whether a node was active because otherwise we would never clean up + // the ActivityLog of nodes lower than the threshold. + // Skip node if it does not fulfill minimumManaThreshold. + if nodeMana <= minimumManaThreshold { + return nil + } - weights[nodeID] = nodeMana - totalWeight += nodeMana + weights[nodeID] = nodeMana + if _, notFirstTime := totalWeightOnce[nodeID]; !notFirstTime { + totalWeight += nodeMana + totalWeightOnce[nodeID] = types.Void + } + return nil + }) } + pruningPoint := c.confirmedEpochRetrieverFunc() - epoch.Index(activeEpochThreshold) + c.clean(pruningPoint) + return weights, totalWeight } +// SnapshotEpochActivity returns the activity log for snapshotting. +func (c *CManaWeightProvider) SnapshotEpochActivity(epochDiffIndex epoch.Index) (epochActivity epoch.SnapshotEpochActivity) { + epochActivity = epoch.NewSnapshotEpochActivity() + + c.mutex.Lock() + defer c.mutex.Unlock() + + c.activityLog.ForEach(func(ei epoch.Index, activity *epoch.ActivityLog) bool { + activity.ForEach(func(nodeID identity.ID) error { + // we save only activity log up to epochDiffIndex as it is the last snapshotted epoch + if ei > epochDiffIndex { + return nil + } + if _, ok := epochActivity[ei]; !ok { + epochActivity[ei] = epoch.NewSnapshotNodeActivity() + } + // Snapshot activity counts + activityCount, exists := c.updatedActivityCount.Get(ei) + if exists { + epochActivity[ei].SetNodeActivity(nodeID, activityCount[nodeID]) + } + return nil + }) + return true + }) + + return +} + // Shutdown shuts down the WeightProvider and persists its state. func (c *CManaWeightProvider) Shutdown() { if c.store != nil { - _ = c.store.Set(kvstore.Key(activeNodesKey), activeNodesToBytes(c.ActiveNodes())) + activeNodes := c.activeNodes() + _ = c.store.Set(kvstore.Key(activeNodesKey), activeNodes.Bytes()) } } -// ActiveNodes returns the map of the active nodes. -func (c *CManaWeightProvider) ActiveNodes() (activeNodes NodesActivityLog) { - activeNodes = make(NodesActivityLog) - +// LoadActiveNodes loads the activity log to weight provider. +func (c *CManaWeightProvider) LoadActiveNodes(loadedActiveNodes epoch.SnapshotEpochActivity) { c.mutex.Lock() defer c.mutex.Unlock() - for nodeID, al := range c.activeNodes { - activeNodes[nodeID] = al.Clone() + for ei, epochActivity := range loadedActiveNodes { + var activityLog *epoch.ActivityLog + var ok bool + if activityLog, ok = c.activityLog.Get(ei); !ok { + activityLog = epoch.NewActivityLog() + c.activityLog.Set(ei, activityLog) + } + for nodeID, activityCount := range epochActivity.NodesLog() { + activityLog.Add(nodeID) + c.updateActivityCount(ei, nodeID, activityCount) + } } - - return activeNodes } // ManaRetrieverFunc is a function type to retrieve consensus mana (e.g. via the mana plugin). @@ -159,190 +216,58 @@ type ManaRetrieverFunc func() map[identity.ID]float64 // TimeRetrieverFunc is a function type to retrieve the time. type TimeRetrieverFunc func() time.Time -// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// +// ConfirmedEpochRetrieverFunc is a function type to retrieve the confirmed epoch index. +type ConfirmedEpochRetrieverFunc func() epoch.Index -// region activeNodes ////////////////////////////////////////////////////////////////////////////////////////////////// +// activeNodes returns the map of the active nodes. +func (c *CManaWeightProvider) activeNodes() (activeNodes *epoch.NodesActivityLog) { + activeNodes = epoch.NewNodesActivityLog() -func activeNodesFromBytes(data []byte) (activeNodes NodesActivityLog, err error) { - _, err = serix.DefaultAPI.Decode(context.Background(), data, &activeNodes, serix.WithValidation()) - if err != nil { - err = errors.Errorf("failed to parse activeNodes: %w", err) - return - } - return -} - -func activeNodesToBytes(activeNodes NodesActivityLog) []byte { - objBytes, err := serix.DefaultAPI.Encode(context.Background(), activeNodes, serix.WithValidation()) - if err != nil { - // TODO: what do? - panic(err) - } - return objBytes -} - -// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// - -// region ActivityLog ////////////////////////////////////////////////////////////////////////////////////////////////// - -// granularity defines the granularity in seconds with which we log node activities. -const granularity = 60 - -// timeToUnixGranularity converts a time t to a unix timestamp with granularity. -func timeToUnixGranularity(t time.Time) int64 { - return t.Unix() / granularity -} - -// ActivityLog is a time-based log of node activity. It stores information when a node was active and provides -// functionality to query for certain timeframes. -type ActivityLog struct { - setTimes set.Set[int64] `serix:"0,lengthPrefixType=uint32"` - times *minHeap -} - -// NewActivityLog is the constructor for ActivityLog. -func NewActivityLog() *ActivityLog { - var mh minHeap + c.mutex.Lock() + defer c.mutex.Unlock() - a := &ActivityLog{ - setTimes: set.New[int64](), - times: &mh, - } - heap.Init(a.times) + c.activityLog.ForEach(func(ei epoch.Index, activity *epoch.ActivityLog) bool { + activeNodes.Set(ei, activity.Clone()) + return true + }) - return a + return activeNodes } -// Add adds a node activity to the log. -func (a *ActivityLog) Add(t time.Time) (added bool) { - u := timeToUnixGranularity(t) - if !a.setTimes.Add(u) { - return false +func (c *CManaWeightProvider) activityBoundaries() (lowerBoundEpoch, upperBoundEpoch epoch.Index) { + currentTime := c.timeRetrieverFunc() + upperBoundEpoch = epoch.IndexFromTime(currentTime) + lowerBoundEpoch = upperBoundEpoch - activeEpochThreshold + if lowerBoundEpoch < 0 { + lowerBoundEpoch = 0 } - - heap.Push(a.times, u) - return true + return } -// Active returns true if the node was active between lower and upper bound. -// It cleans up the log on the fly, meaning that old/stale times are deleted. -// If the log ends up empty after cleaning up, empty is set to true. -func (a *ActivityLog) Active(lowerBound, upperBound time.Time) (active, empty bool) { - lb, ub := timeToUnixGranularity(lowerBound), timeToUnixGranularity(upperBound) - - for a.times.Len() > 0 { - // Get the lowest element of the min-heap = the earliest time. - earliestActivity := (*a.times)[0] - - // We clean up possible stale times < lowerBound because we don't need them anymore. - if earliestActivity < lb { - a.setTimes.Delete(earliestActivity) - heap.Pop(a.times) - continue +// clean removes all activity logs for epochs lower than provided bound. +func (c *CManaWeightProvider) clean(cutoffEpoch epoch.Index) { + c.activityLog.ForEachKey(func(ei epoch.Index) bool { + if ei < cutoffEpoch { + c.activityLog.Delete(ei) } - - // Check if time is between lower and upper bound. Because of cleanup, earliestActivity >= lb is implicitly given. - if earliestActivity <= ub { - return true, false + return true + }) + // clean also the updates counting map + c.updatedActivityCount.ForEach(func(ei epoch.Index, count ActivityUpdatesCount) bool { + if ei < cutoffEpoch { + c.updatedActivityCount.Delete(ei) } - // Otherwise, the node has active times in the future of upperBound but is not currently active. - return false, false - } - - // If the heap is empty, there's no activity anymore and the object might potentially be cleaned up. - return false, true -} - -// Times returns all times stored in this ActivityLog. -func (a *ActivityLog) Times() (times []int64) { - times = make([]int64, 0, a.times.Len()) - - for _, u := range *a.times { - times = append(times, u) - } - - return times -} - -// String returns a human-readable version of ActivityLog. -func (a *ActivityLog) String() string { - var builder strings.Builder - builder.WriteString(fmt.Sprintf("ActivityLog(len=%d, elements=", a.times.Len())) - for _, u := range *a.times { - builder.WriteString(fmt.Sprintf("%d, ", u)) - } - builder.WriteString(")") - return builder.String() -} - -// Clone clones the ActivityLog. -func (a *ActivityLog) Clone() *ActivityLog { - clone := NewActivityLog() - - for _, u := range *a.times { - clone.setTimes.Add(u) - heap.Push(clone.times, u) - } - - return clone -} - -// Encode ActivityLog a serialized byte slice of the object. -func (a *ActivityLog) Encode() ([]byte, error) { - objBytes, err := serix.DefaultAPI.Encode(context.Background(), a.setTimes, serix.WithValidation()) - if err != nil { - // TODO: what do? - panic(err) - } - return objBytes, nil -} - -// Decode deserializes bytes into a valid object. -func (a *ActivityLog) Decode(data []byte) (bytesRead int, err error) { - var mh minHeap - - a.setTimes = set.New[int64]() - a.times = &mh - bytesRead, err = serix.DefaultAPI.Decode(context.Background(), data, &a.setTimes, serix.WithValidation()) - if err != nil { - err = errors.Errorf("failed to parse ActivityLog: %w", err) - return - } - a.setTimes.ForEach(func(time int64) { - heap.Push(a.times, time) + return true }) - return } -// minHeap is an int64 min heap. -type minHeap []int64 - -// Len is the number of elements in the collection. -func (h minHeap) Len() int { - return len(h) -} - -// Less reports whether the element with index i must sort before the element with index j. -func (h minHeap) Less(i, j int) bool { - return h[i] < h[j] -} - -// Swap swaps the elements with indexes i and j. -func (h minHeap) Swap(i, j int) { - h[i], h[j] = h[j], h[i] -} - -// Push pushes the element x onto the heap. -func (h *minHeap) Push(x interface{}) { - *h = append(*h, x.(int64)) -} - -// Pop removes and returns the minimum element (according to Less) from the heap. -func (h *minHeap) Pop() interface{} { - n := len(*h) - x := (*h)[n-1] - *h = (*h)[:n-1] - return x +func (c *CManaWeightProvider) updateActivityCount(ei epoch.Index, nodeID identity.ID, increase uint64) { + _, exist := c.updatedActivityCount.Get(ei) + if !exist { + c.updatedActivityCount.Set(ei, make(ActivityUpdatesCount)) + } + epochUpdatesCount, _ := c.updatedActivityCount.Get(ei) + epochUpdatesCount[nodeID] += increase } // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/packages/core/tangleold/cmanaweightprovider_test.go b/packages/core/tangleold/cmanaweightprovider_test.go index 90a9076d42..f608f4e5fb 100644 --- a/packages/core/tangleold/cmanaweightprovider_test.go +++ b/packages/core/tangleold/cmanaweightprovider_test.go @@ -1,10 +1,10 @@ package tangleold import ( + "math/rand" "testing" "time" - "github.com/iotaledger/hive.go/core/crypto" "github.com/iotaledger/hive.go/core/identity" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -19,23 +19,32 @@ func TestActiveNodesMarshalling(t *testing.T) { "node3": identity.GenerateIdentity().ID(), } - activeNodes := make(map[identity.ID]*ActivityLog) - for _, nodeID := range nodes { - a := NewActivityLog() - - for i := 0; i < crypto.Randomness.Intn(100); i++ { - a.Add(time.Now().Add(time.Duration(i)*time.Minute + time.Hour)) + activeNodes := epoch.NewNodesActivityLog() + + for i := 0; i < 100; i++ { + ei := epoch.Index(i) + al := epoch.NewActivityLog() + activeNodes.Set(ei, al) + weight := 1.0 + for _, nodeID := range nodes { + if rand.Float64() < 0.1*weight { + al.Add(nodeID) + } + weight += 1 } - - activeNodes[nodeID] = a } - activeNodesBytes := activeNodesToBytes(activeNodes) - activeNodes2, err := activeNodesFromBytes(activeNodesBytes) - require.NoError(t, err) - for nodeID, a := range activeNodes { - assert.EqualValues(t, a.setTimes.Size(), activeNodes2[nodeID].setTimes.Size()) - } + activeNodesBytes := activeNodes.Bytes() + require.NotNil(t, activeNodesBytes) + activeNodes2 := epoch.NewNodesActivityLog() + err := activeNodes2.FromBytes(activeNodesBytes) + require.NoError(t, err) + activeNodes.ForEach(func(ei epoch.Index, activity *epoch.ActivityLog) bool { + activity2, exists := activeNodes2.Get(ei) + require.True(t, exists) + assert.EqualValues(t, activity.Size(), activity2.Size()) + return true + }) } func TestCManaWeightProvider(t *testing.T) { @@ -53,31 +62,38 @@ func TestCManaWeightProvider(t *testing.T) { } } - tangleTime := time.Unix(epoch.GenesisTime, 0) - timeRetrieverFunc := func() time.Time { return tangleTime } - - weightProvider := NewCManaWeightProvider(manaRetrieverFunc, timeRetrieverFunc) + startEpoch := epoch.IndexFromTime(time.Now()) + epochManager := &struct { + ei epoch.Index + }{ + ei: startEpoch, + } + epochRetrieverFunc := func() epoch.Index { return epochManager.ei } + timeRetrieverFunc := func() time.Time { return epochRetrieverFunc().StartTime() } + confirmedRetrieverFunc := func() epoch.Index { return 0 } + weightProvider := NewCManaWeightProvider(manaRetrieverFunc, timeRetrieverFunc, confirmedRetrieverFunc) - // Add node1 as active in the genesis. + // Add node1 as active in the genesis epoch. { - weightProvider.Update(tangleTime, nodes["1"]) + weightProvider.Update(epochRetrieverFunc(), nodes["1"]) assertWeightsOfRelevantVoters(t, weightProvider, nodes, map[string]float64{ "1": 20, }) } - // Add node2 and node3 activity at tangleTime+20 -> only node1 is active. + // Add node2 and node3 activity at epoch == 2 -> only node1 is active. { - weightProvider.Update(tangleTime.Add(3*time.Minute), nodes["2"]) - weightProvider.Update(tangleTime.Add(3*time.Minute), nodes["3"]) + weightProvider.Update(epochRetrieverFunc()+1, nodes["2"]) + weightProvider.Update(epochRetrieverFunc()+1, nodes["3"]) assertWeightsOfRelevantVoters(t, weightProvider, nodes, map[string]float64{ "1": 20, }) } - // Advance TangleTime by 4min -> all nodes are active. + // Advance LatestCommittableEpoch by one epoch -> all nodes are active. { - tangleTime = tangleTime.Add(4 * time.Minute) + + epochManager.ei = epochRetrieverFunc() + 1 assertWeightsOfRelevantVoters(t, weightProvider, nodes, map[string]float64{ "1": 20, "2": 50, @@ -85,18 +101,18 @@ func TestCManaWeightProvider(t *testing.T) { }) } - // Advance TangleTime by 10min -> node1 and node2 are active. + // Advance LatestCommittableEpoch by two epochs -> node1 and node2 are active. { - tangleTime = tangleTime.Add(2 * time.Minute) + epochManager.ei = epochRetrieverFunc() + activeEpochThreshold assertWeightsOfRelevantVoters(t, weightProvider, nodes, map[string]float64{ "2": 50, "3": 30, }) } - // Advance tangleTime by 25min -> no node is active anymore. + // Advance LatestCommittableEpoch by three epochs -> no node is active anymore. { - tangleTime = tangleTime.Add(4 * time.Minute) + epochManager.ei = epochRetrieverFunc() + 2 assertWeightsOfRelevantVoters(t, weightProvider, nodes, map[string]float64{}) } } diff --git a/packages/core/tangleold/payload/payload.go b/packages/core/tangleold/payload/payload.go index 7e5809ae51..e98359fc0d 100644 --- a/packages/core/tangleold/payload/payload.go +++ b/packages/core/tangleold/payload/payload.go @@ -10,7 +10,7 @@ import ( // MaxSize = MaxBlockSize - // (version(1) + parentsBlocksCount(1) + 3 * (parentsType(1) + parentsCount(1) + 8 * reference(40)) + // issuerPK(32) + issuanceTime(8) + seqNum(8) + payloadLength(4) + -// + EI(8) + ECR(32) + PrevEC(32) + LatestConfirmedEpoch(8) +// + ECRecordI(8) + ECR(32) + PrevEC(32) + LatestConfirmedEpoch(8) // + nonce(8) + signature(64) // = MaxBlockSize - 1172 bytes = 64364 const MaxSize = 65536 - 1172 diff --git a/packages/core/tangleold/scenarios.go b/packages/core/tangleold/scenarios.go index 2f02efba46..c4af683060 100644 --- a/packages/core/tangleold/scenarios.go +++ b/packages/core/tangleold/scenarios.go @@ -8,6 +8,7 @@ import ( "github.com/iotaledger/hive.go/core/identity" "github.com/stretchr/testify/require" + "github.com/iotaledger/goshimmer/packages/core/epoch" "github.com/iotaledger/goshimmer/packages/core/markers" ) @@ -79,7 +80,7 @@ func ProcessBlockScenario(t *testing.T, options ...Option) *TestScenario { var weightProvider *CManaWeightProvider manaRetrieverMock := func() map[identity.ID]float64 { for _, node := range s.nodes { - weightProvider.Update(time.Now(), node.ID()) + weightProvider.Update(1, node.ID()) } return map[identity.ID]float64{ s.nodes["A"].ID(): 30, @@ -89,7 +90,11 @@ func ProcessBlockScenario(t *testing.T, options ...Option) *TestScenario { s.nodes["E"].ID(): 10, } } - weightProvider = NewCManaWeightProvider(manaRetrieverMock, time.Now) + testEpoch := epoch.IndexFromTime(time.Now()) + epochRetrieverFunc := func() epoch.Index { return testEpoch } + timeProvider := func() time.Time { return epochRetrieverFunc().StartTime() } + confirmedRetrieverFunc := func() epoch.Index { return 0 } + weightProvider = NewCManaWeightProvider(manaRetrieverMock, timeProvider, confirmedRetrieverFunc) s.Tangle = NewTestTangle(append([]Option{ ApprovalWeights(weightProvider), @@ -100,9 +105,14 @@ func ProcessBlockScenario(t *testing.T, options ...Option) *TestScenario { s.testEventMock = NewEventMock(t, s.Tangle.ApprovalWeightManager) s.TestFramework = NewBlockTestFramework(s.Tangle, WithGenesisOutput("A", 500)) + s.Steps = []TestStep{ // ISSUE Block1 func(t *testing.T, testFramework *BlockTestFramework, testEventMock *EventMock, nodes NodeIdentities) { + // Make all nodes active + for node := range nodes { + weightProvider.Update(epochRetrieverFunc(), nodes[node].ID()) + } testFramework.CreateBlock("Block1", WithStrongParents("Genesis"), WithIssuer(nodes["A"].PublicKey())) testEventMock.Expect("MarkerWeightChanged", markers.NewMarker(0, 1), 0.3) @@ -437,7 +447,7 @@ func ProcessBlockScenario2(t *testing.T, options ...Option) *TestScenario { var weightProvider *CManaWeightProvider manaRetrieverMock := func() map[identity.ID]float64 { for _, node := range s.nodes { - weightProvider.Update(time.Now(), node.ID()) + weightProvider.Update(epoch.Index(1), node.ID()) } return map[identity.ID]float64{ s.nodes["A"].ID(): 30, @@ -447,7 +457,12 @@ func ProcessBlockScenario2(t *testing.T, options ...Option) *TestScenario { s.nodes["E"].ID(): 10, } } - weightProvider = NewCManaWeightProvider(manaRetrieverMock, time.Now) + testEpoch := epoch.IndexFromTime(time.Now()) + epochRetrieverFunc := func() epoch.Index { return testEpoch } + timeProvider := func() time.Time { return epochRetrieverFunc().StartTime() } + confirmedRetrieverFunc := func() epoch.Index { return 0 } + + weightProvider = NewCManaWeightProvider(manaRetrieverMock, timeProvider, confirmedRetrieverFunc) s.Tangle = NewTestTangle(append([]Option{ ApprovalWeights(weightProvider), @@ -460,6 +475,10 @@ func ProcessBlockScenario2(t *testing.T, options ...Option) *TestScenario { s.Steps = []TestStep{ // ISSUE Block0 func(t *testing.T, testFramework *BlockTestFramework, testEventMock *EventMock, nodes NodeIdentities) { + // Make all nodes active + for node := range nodes { + weightProvider.Update(epochRetrieverFunc(), nodes[node].ID()) + } testFramework.CreateBlock("Block0", WithStrongParents("Genesis"), WithIssuer(nodes["A"].PublicKey())) testEventMock.Expect("MarkerWeightChanged", markers.NewMarker(0, 1), 0.30) diff --git a/packages/core/tangleold/scheduler.go b/packages/core/tangleold/scheduler.go index 1d141baf3b..c2e16f836a 100644 --- a/packages/core/tangleold/scheduler.go +++ b/packages/core/tangleold/scheduler.go @@ -296,24 +296,6 @@ func (s *Scheduler) GetManaFromCache(nodeID identity.ID) int64 { return int64(math.Ceil(s.AccessManaCache().GetCachedMana(nodeID))) } -// Clear removes all submitted blocks (ready or not) from the scheduler. -// The BlockDiscarded event is triggered for each of these blocks. -func (s *Scheduler) Clear() { - s.bufferMutex.Lock() - defer s.bufferMutex.Unlock() - - for q := s.buffer.Current(); q != nil; q = s.buffer.Next() { - s.buffer.RemoveNode(q.NodeID()) - for _, id := range q.IDs() { - blockID := blockIDFromElementID(id) - s.tangle.Storage.BlockMetadata(blockID).Consume(func(blockMetadata *BlockMetadata) { - blockMetadata.SetDiscardedTime(clock.SyncedTime()) - }) - s.Events.BlockDiscarded.Trigger(&BlockDiscardedEvent{blockID}) - } - } -} - // isEligible returns true if the given blockID has either been scheduled or confirmed. func (s *Scheduler) isEligible(blockID BlockID) (eligible bool) { s.tangle.Storage.BlockMetadata(blockID).Consume(func(blockMetadata *BlockMetadata) { @@ -529,9 +511,6 @@ loop: break loop } } - - // remove all unscheduled blocks - s.Clear() } func (s *Scheduler) GetDeficit(nodeID identity.ID) *big.Rat { diff --git a/packages/core/tangleold/scheduler_test.go b/packages/core/tangleold/scheduler_test.go index fb69468d5f..fbc4b40f7d 100644 --- a/packages/core/tangleold/scheduler_test.go +++ b/packages/core/tangleold/scheduler_test.go @@ -118,34 +118,6 @@ func TestScheduler_Discarded(t *testing.T) { }, 1*time.Second, 10*time.Millisecond) } -func TestScheduler_DiscardedAtShutdown(t *testing.T) { - tangle := NewTestTangle(Identity(selfLocalIdentity)) - defer tangle.Shutdown() - - blockDiscarded := make(chan BlockID, 1) - tangle.Scheduler.Events.BlockDiscarded.Hook(event.NewClosure(func(event *BlockDiscardedEvent) { - blockDiscarded <- event.BlockID - })) - - tangle.Scheduler.Start() - - blk := newBlock(selfNode.PublicKey()) - tangle.Storage.StoreBlock(blk) - assert.NoError(t, tangle.Scheduler.Submit(blk.ID())) - - time.Sleep(100 * time.Millisecond) - tangle.Scheduler.Shutdown() - - assert.Eventually(t, func() bool { - select { - case id := <-blockDiscarded: - return assert.Equal(t, blk.ID(), id) - default: - return false - } - }, 1*time.Second, 10*time.Millisecond) -} - func TestScheduler_SetRateBeforeStart(t *testing.T) { tangle := NewTestTangle(Identity(selfLocalIdentity)) defer tangle.Shutdown() diff --git a/packages/core/tangleold/storage.go b/packages/core/tangleold/storage.go index 7e9809649a..5ad1208506 100644 --- a/packages/core/tangleold/storage.go +++ b/packages/core/tangleold/storage.go @@ -370,7 +370,9 @@ func (s *Storage) deleteChild(parent Parent, approvingBlock BlockID) { // Shutdown marks the tangle as stopped, so it will not accept any new blocks (waits for all backgroundTasks to finish). func (s *Storage) Shutdown() { s.blockStorage.Shutdown() + fmt.Println("======= Shutdown block metadate starts") s.blockMetadataStorage.Shutdown() + fmt.Println("======= Shutdown block metadate ends") s.childStorage.Shutdown() s.missingBlockStorage.Shutdown() s.attachmentStorage.Shutdown() diff --git a/packages/core/tangleold/tangle.go b/packages/core/tangleold/tangle.go index ac917a4f1e..97b09c141f 100644 --- a/packages/core/tangleold/tangle.go +++ b/packages/core/tangleold/tangle.go @@ -344,7 +344,10 @@ func CommitmentFunc(commitmentRetrieverFunc func() (*epoch.ECRecord, epoch.Index // in a flexible way, independently of a specific implementation. type WeightProvider interface { // Update updates the underlying data structure and keeps track of active nodes. - Update(t time.Time, nodeID identity.ID) + Update(ei epoch.Index, nodeID identity.ID) + + // Remove updates the underlying data structure by removing node from active list if no activity left. + Remove(ei epoch.Index, nodeID identity.ID, decreaseBy uint64) (removed bool) // Weight returns the weight and total weight for the given block. Weight(block *Block) (weight, totalWeight float64) @@ -352,6 +355,12 @@ type WeightProvider interface { // WeightsOfRelevantVoters returns all relevant weights. WeightsOfRelevantVoters() (weights map[identity.ID]float64, totalWeight float64) + // SnapshotEpochActivity returns the activity log for snapshotting. + SnapshotEpochActivity(epochDiffIndex epoch.Index) (epochActivity epoch.SnapshotEpochActivity) + + // LoadActiveNodes loads active nodes from the snapshot activity log. + LoadActiveNodes(loadedActiveNodes epoch.SnapshotEpochActivity) + // Shutdown shuts down the WeightProvider and persists its state. Shutdown() } diff --git a/packages/core/tangleold/testutils.go b/packages/core/tangleold/testutils.go index 0be6af86ec..0d6101ce48 100644 --- a/packages/core/tangleold/testutils.go +++ b/packages/core/tangleold/testutils.go @@ -171,7 +171,7 @@ func (m *BlockTestFramework) PreventNewMarkers(enabled bool) *BlockTestFramework } // LatestCommitment gets the latest commitment. -func (m *BlockTestFramework) LatestCommitment(blockAliases ...string) (ecRecord *epoch.ECRecord, latestConfirmedEpoch epoch.Index, err error) { +func (m *BlockTestFramework) LatestCommitment() (ecRecord *epoch.ECRecord, latestConfirmedEpoch epoch.Index, err error) { return m.tangle.Options.CommitmentFunc() } @@ -203,7 +203,7 @@ func (m *BlockTestFramework) Block(alias string) (block *Block) { return } -// Block retrieves the Blocks that is associated with the given alias. +// BlockIDs retrieves the Blocks that is associated with the given alias. func (m *BlockTestFramework) BlockIDs(aliases ...string) (blockIDs BlockIDs) { blockIDs = NewBlockIDs() for _, alias := range aliases { @@ -294,7 +294,7 @@ func (m *BlockTestFramework) createGenesisOutputs() { return } - manaPledgeID, err := identity.RandomID() + manaPledgeID, err := identity.RandomIDInsecure() if err != nil { panic(err) } @@ -310,11 +310,21 @@ func (m *BlockTestFramework) createGenesisOutputs() { outputWithMetadata := m.createOutput(alias, devnetvm.NewColoredBalances(coloredBalances), manaPledgeID, manaPledgeTime) outputsWithMetadata = append(outputsWithMetadata, outputWithMetadata) } + activeNodes := createActivityLog(manaPledgeTime, manaPledgeID) - m.snapshot = ledger.NewSnapshot(outputsWithMetadata) + m.snapshot = ledger.NewSnapshot(outputsWithMetadata, activeNodes) loadSnapshotToLedger(m.tangle.Ledger, m.snapshot) } +// createActivityLog create activity log and adds provided node for given time. +func createActivityLog(activityTime time.Time, nodeID identity.ID) epoch.SnapshotEpochActivity { + ei := epoch.IndexFromTime(activityTime) + activeNodes := make(epoch.SnapshotEpochActivity) + activeNodes[ei] = epoch.NewSnapshotNodeActivity() + activeNodes[ei].SetNodeActivity(nodeID, 1) + return activeNodes +} + func (m *BlockTestFramework) createOutput(alias string, coloredBalances *devnetvm.ColoredBalances, manaPledgeID identity.ID, manaPledgeTime time.Time) (outputWithMetadata *ledger.OutputWithMetadata) { addressWallet := createWallets(1)[0] m.walletsByAlias[alias] = addressWallet @@ -929,8 +939,21 @@ func (m *MockConfirmationOracle) Events() *ConfirmationEvents { // MockWeightProvider is a mock of a WeightProvider. type MockWeightProvider struct{} +func (m *MockWeightProvider) SnapshotEpochActivity(ei epoch.Index) (epochActivity epoch.SnapshotEpochActivity) { + return nil +} + +// LoadActiveNodes mocks its interface function. +func (m *MockWeightProvider) LoadActiveNodes(loadedActiveNodes epoch.SnapshotEpochActivity) { +} + // Update mocks its interface function. -func (m *MockWeightProvider) Update(t time.Time, nodeID identity.ID) { +func (m *MockWeightProvider) Update(ei epoch.Index, nodeID identity.ID) { +} + +// Remove mocks its interface function. +func (m *MockWeightProvider) Remove(ei epoch.Index, nodeID identity.ID, count uint64) (removed bool) { + return true } // Weight mocks its interface function. @@ -1072,8 +1095,10 @@ func (e *EventMock) BlockProcessed(event *BlockProcessedEvent) { // loadSnapshotToLedger loads a snapshot of the Ledger from the given snapshot. func loadSnapshotToLedger(l *ledger.Ledger, s *ledger.Snapshot) { l.LoadOutputWithMetadatas(s.OutputsWithMetadata) - err := l.LoadEpochDiffs(s.Header, s.EpochDiffs) - if err != nil { - panic("Failed to load epochDiffs from snapshot") + for _, diffs := range s.EpochDiffs { + err := l.LoadEpochDiff(diffs) + if err != nil { + panic("Failed to load epochDiffs from snapshot") + } } } diff --git a/packages/core/tangleold/timemanager.go b/packages/core/tangleold/timemanager.go index eb1fefba68..6929274f74 100644 --- a/packages/core/tangleold/timemanager.go +++ b/packages/core/tangleold/timemanager.go @@ -275,9 +275,9 @@ func (l LastBlock) Bytes() (marshaledLastConfirmedBlock []byte) { // String returns a human-readable version of the LastBlock. func (l LastBlock) String() string { return stringify.Struct("LastBlock", - stringify.StructField("BlockID", l.BlockID), - stringify.StructField("BlockTime", l.BlockTime), - stringify.StructField("UpdateTime", l.UpdateTime), + stringify.NewStructField("BlockID", l.BlockID), + stringify.NewStructField("BlockTime", l.BlockTime), + stringify.NewStructField("UpdateTime", l.UpdateTime), ) } diff --git a/packages/node/gossip/gossipproto/message.pb.go b/packages/node/gossip/gossipproto/message.pb.go index fcd0fe1459..b6dd188220 100644 --- a/packages/node/gossip/gossipproto/message.pb.go +++ b/packages/node/gossip/gossipproto/message.pb.go @@ -1,17 +1,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.27.1 -// protoc v3.18.0 -// source: block.proto +// protoc v3.12.4 +// source: packages/node/gossip/gossipproto/message.proto package gossipproto import ( - "reflect" - "sync" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoimpl" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) const ( @@ -36,7 +35,7 @@ type Packet struct { func (x *Packet) Reset() { *x = Packet{} if protoimpl.UnsafeEnabled { - mi := &file_block_proto_blkTypes[0] + mi := &file_packages_node_gossip_gossipproto_message_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -46,10 +45,10 @@ func (x *Packet) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Packet) ProtoBlock() {} +func (*Packet) ProtoMessage() {} func (x *Packet) ProtoReflect() protoreflect.Message { - mi := &file_block_proto_blkTypes[0] + mi := &file_packages_node_gossip_gossipproto_message_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -62,7 +61,7 @@ func (x *Packet) ProtoReflect() protoreflect.Message { // Deprecated: Use Packet.ProtoReflect.Descriptor instead. func (*Packet) Descriptor() ([]byte, []int) { - return file_block_proto_rawDescGZIP(), []int{0} + return file_packages_node_gossip_gossipproto_message_proto_rawDescGZIP(), []int{0} } func (m *Packet) GetBody() isPacket_Body { @@ -126,7 +125,7 @@ type Block struct { func (x *Block) Reset() { *x = Block{} if protoimpl.UnsafeEnabled { - mi := &file_block_proto_blkTypes[1] + mi := &file_packages_node_gossip_gossipproto_message_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -136,10 +135,10 @@ func (x *Block) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Block) ProtoBlock() {} +func (*Block) ProtoMessage() {} func (x *Block) ProtoReflect() protoreflect.Message { - mi := &file_block_proto_blkTypes[1] + mi := &file_packages_node_gossip_gossipproto_message_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -152,7 +151,7 @@ func (x *Block) ProtoReflect() protoreflect.Message { // Deprecated: Use Block.ProtoReflect.Descriptor instead. func (*Block) Descriptor() ([]byte, []int) { - return file_block_proto_rawDescGZIP(), []int{1} + return file_packages_node_gossip_gossipproto_message_proto_rawDescGZIP(), []int{1} } func (x *Block) GetData() []byte { @@ -173,7 +172,7 @@ type BlockRequest struct { func (x *BlockRequest) Reset() { *x = BlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_block_proto_blkTypes[2] + mi := &file_packages_node_gossip_gossipproto_message_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -183,10 +182,10 @@ func (x *BlockRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*BlockRequest) ProtoBlock() {} +func (*BlockRequest) ProtoMessage() {} func (x *BlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_block_proto_blkTypes[2] + mi := &file_packages_node_gossip_gossipproto_message_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -199,7 +198,7 @@ func (x *BlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockRequest.ProtoReflect.Descriptor instead. func (*BlockRequest) Descriptor() ([]byte, []int) { - return file_block_proto_rawDescGZIP(), []int{2} + return file_packages_node_gossip_gossipproto_message_proto_rawDescGZIP(), []int{2} } func (x *BlockRequest) GetId() []byte { @@ -218,7 +217,7 @@ type Negotiation struct { func (x *Negotiation) Reset() { *x = Negotiation{} if protoimpl.UnsafeEnabled { - mi := &file_block_proto_blkTypes[3] + mi := &file_packages_node_gossip_gossipproto_message_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -228,10 +227,10 @@ func (x *Negotiation) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Negotiation) ProtoBlock() {} +func (*Negotiation) ProtoMessage() {} func (x *Negotiation) ProtoReflect() protoreflect.Message { - mi := &file_block_proto_blkTypes[3] + mi := &file_packages_node_gossip_gossipproto_message_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -244,61 +243,59 @@ func (x *Negotiation) ProtoReflect() protoreflect.Message { // Deprecated: Use Negotiation.ProtoReflect.Descriptor instead. func (*Negotiation) Descriptor() ([]byte, []int) { - return file_block_proto_rawDescGZIP(), []int{3} -} - -var File_block_proto protoreflect.FileDescriptor - -var file_block_proto_rawDesc = []byte{ - 0x0a, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x0b, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc7, 0x01, 0x0a, - 0x06, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x30, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x73, 0x73, 0x69, - 0x70, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, - 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, - 0x52, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x3c, 0x0a, 0x0b, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, - 0x00, 0x52, 0x0b, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x06, - 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x22, 0x1d, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x20, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x0d, 0x0a, 0x0b, 0x4e, 0x65, 0x67, 0x6f, 0x74, - 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x3d, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x6f, 0x74, 0x61, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x72, 0x2f, - 0x67, 0x6f, 0x73, 0x68, 0x69, 0x6d, 0x6d, 0x65, 0x72, 0x2f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x2f, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + return file_packages_node_gossip_gossipproto_message_proto_rawDescGZIP(), []int{3} +} + +var File_packages_node_gossip_gossipproto_message_proto protoreflect.FileDescriptor + +var file_packages_node_gossip_gossipproto_message_proto_rawDesc = []byte{ + 0x0a, 0x2e, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, + 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x2f, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0b, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x01, + 0x0a, 0x06, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x05, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x73, + 0x73, 0x69, 0x70, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0b, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x73, + 0x73, 0x69, 0x70, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x22, 0x1b, 0x0a, 0x05, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x1e, 0x0a, 0x0c, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x0d, 0x0a, 0x0b, 0x4e, 0x65, 0x67, 0x6f, + 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x3d, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x6f, 0x74, 0x61, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x72, + 0x2f, 0x67, 0x6f, 0x73, 0x68, 0x69, 0x6d, 0x6d, 0x65, 0x72, 0x2f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x2f, 0x67, 0x6f, 0x73, 0x73, 0x69, + 0x70, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - file_block_proto_rawDescOnce sync.Once - file_block_proto_rawDescData = file_block_proto_rawDesc + file_packages_node_gossip_gossipproto_message_proto_rawDescOnce sync.Once + file_packages_node_gossip_gossipproto_message_proto_rawDescData = file_packages_node_gossip_gossipproto_message_proto_rawDesc ) -func file_block_proto_rawDescGZIP() []byte { - file_block_proto_rawDescOnce.Do(func() { - file_block_proto_rawDescData = protoimpl.X.CompressGZIP(file_block_proto_rawDescData) +func file_packages_node_gossip_gossipproto_message_proto_rawDescGZIP() []byte { + file_packages_node_gossip_gossipproto_message_proto_rawDescOnce.Do(func() { + file_packages_node_gossip_gossipproto_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_packages_node_gossip_gossipproto_message_proto_rawDescData) }) - return file_block_proto_rawDescData + return file_packages_node_gossip_gossipproto_message_proto_rawDescData } -var ( - file_block_proto_blkTypes = make([]protoimpl.MessageInfo, 4) - file_block_proto_goTypes = []interface{}{ - (*Packet)(nil), // 0: gossipproto.Packet - (*Block)(nil), // 1: gossipproto.Block - (*BlockRequest)(nil), // 2: gossipproto.BlockRequest - (*Negotiation)(nil), // 3: gossipproto.Negotiation - } -) - -var file_block_proto_depIdxs = []int32{ +var file_packages_node_gossip_gossipproto_message_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_packages_node_gossip_gossipproto_message_proto_goTypes = []interface{}{ + (*Packet)(nil), // 0: gossipproto.Packet + (*Block)(nil), // 1: gossipproto.Block + (*BlockRequest)(nil), // 2: gossipproto.BlockRequest + (*Negotiation)(nil), // 3: gossipproto.Negotiation +} +var file_packages_node_gossip_gossipproto_message_proto_depIdxs = []int32{ 1, // 0: gossipproto.Packet.block:type_name -> gossipproto.Block 2, // 1: gossipproto.Packet.blockRequest:type_name -> gossipproto.BlockRequest 3, // 2: gossipproto.Packet.negotiation:type_name -> gossipproto.Negotiation @@ -309,13 +306,13 @@ var file_block_proto_depIdxs = []int32{ 0, // [0:3] is the sub-list for field type_name } -func init() { file_block_proto_init() } -func file_block_proto_init() { - if File_block_proto != nil { +func init() { file_packages_node_gossip_gossipproto_message_proto_init() } +func file_packages_node_gossip_gossipproto_message_proto_init() { + if File_packages_node_gossip_gossipproto_message_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_block_proto_blkTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_packages_node_gossip_gossipproto_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Packet); i { case 0: return &v.state @@ -327,7 +324,7 @@ func file_block_proto_init() { return nil } } - file_block_proto_blkTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_packages_node_gossip_gossipproto_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Block); i { case 0: return &v.state @@ -339,7 +336,7 @@ func file_block_proto_init() { return nil } } - file_block_proto_blkTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_packages_node_gossip_gossipproto_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockRequest); i { case 0: return &v.state @@ -351,7 +348,7 @@ func file_block_proto_init() { return nil } } - file_block_proto_blkTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_packages_node_gossip_gossipproto_message_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Negotiation); i { case 0: return &v.state @@ -364,7 +361,7 @@ func file_block_proto_init() { } } } - file_block_proto_blkTypes[0].OneofWrappers = []interface{}{ + file_packages_node_gossip_gossipproto_message_proto_msgTypes[0].OneofWrappers = []interface{}{ (*Packet_Block)(nil), (*Packet_BlockRequest)(nil), (*Packet_Negotiation)(nil), @@ -373,18 +370,18 @@ func file_block_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_block_proto_rawDesc, + RawDescriptor: file_packages_node_gossip_gossipproto_message_proto_rawDesc, NumEnums: 0, NumMessages: 4, NumExtensions: 0, NumServices: 0, }, - GoTypes: file_block_proto_goTypes, - DependencyIndexes: file_block_proto_depIdxs, - MessageInfos: file_block_proto_blkTypes, + GoTypes: file_packages_node_gossip_gossipproto_message_proto_goTypes, + DependencyIndexes: file_packages_node_gossip_gossipproto_message_proto_depIdxs, + MessageInfos: file_packages_node_gossip_gossipproto_message_proto_msgTypes, }.Build() - File_block_proto = out.File - file_block_proto_rawDesc = nil - file_block_proto_goTypes = nil - file_block_proto_depIdxs = nil + File_packages_node_gossip_gossipproto_message_proto = out.File + file_packages_node_gossip_gossipproto_message_proto_rawDesc = nil + file_packages_node_gossip_gossipproto_message_proto_goTypes = nil + file_packages_node_gossip_gossipproto_message_proto_depIdxs = nil } diff --git a/packages/node/gossip/gossipproto/message.proto b/packages/node/gossip/gossipproto/message.proto index 884d4e65c2..f2137161c5 100644 --- a/packages/node/gossip/gossipproto/message.proto +++ b/packages/node/gossip/gossipproto/message.proto @@ -4,7 +4,7 @@ option go_package = "github.com/iotaledger/goshimmer/packages/gossip/gossipproto package gossipproto; -block Packet { +message Packet { oneof body { Block block = 1; BlockRequest blockRequest = 2; @@ -12,12 +12,12 @@ block Packet { } } -block Block { +message Block { bytes data = 1; } -block BlockRequest { +message BlockRequest { bytes id = 1; } -block Negotiation {} \ No newline at end of file +message Negotiation {} \ No newline at end of file diff --git a/packages/node/gossip/manager.go b/packages/node/gossip/manager.go index 99cf16bbdb..9465e17f32 100644 --- a/packages/node/gossip/manager.go +++ b/packages/node/gossip/manager.go @@ -113,7 +113,7 @@ func (m *Manager) Stop() { func (m *Manager) RequestBlock(blockID []byte, to ...identity.ID) { blkReq := &gp.BlockRequest{Id: blockID} packet := &gp.Packet{Body: &gp.Packet_BlockRequest{BlockRequest: blkReq}} - recipients := m.send(packet, to...) + recipients := m.p2pManager.Send(packet, protocolID, to...) if m.blocksRateLimiter != nil { for _, nbr := range recipients { // Increase the limit by 2 for every block request to make rate limiter more forgiving during node sync. @@ -127,22 +127,7 @@ func (m *Manager) RequestBlock(blockID []byte, to ...identity.ID) { func (m *Manager) SendBlock(blkData []byte, to ...identity.ID) { blk := &gp.Block{Data: blkData} packet := &gp.Packet{Body: &gp.Packet_Block{Block: blk}} - m.send(packet, to...) -} - -func (m *Manager) send(packet *gp.Packet, to ...identity.ID) []*p2p.Neighbor { - neighbors := m.p2pManager.GetNeighborsByID(to) - if len(neighbors) == 0 { - neighbors = m.p2pManager.AllNeighbors() - } - - for _, nbr := range neighbors { - if err := nbr.GetStream(protocolID).WritePacket(packet); err != nil { - m.log.Warnw("send error", "peer-id", nbr.ID(), "err", err) - nbr.Close() - } - } - return neighbors + m.p2pManager.Send(packet, protocolID, to...) } func (m *Manager) handlePacket(nbr *p2p.Neighbor, packet proto.Message) error { diff --git a/packages/node/p2p/manager.go b/packages/node/p2p/manager.go index 5786787ef0..5666797f7b 100644 --- a/packages/node/p2p/manager.go +++ b/packages/node/p2p/manager.go @@ -135,6 +135,7 @@ func (m *Manager) GetP2PHost() host.Host { func (m *Manager) AddOutbound(ctx context.Context, p *peer.Peer, group NeighborsGroup, connectOpts ...ConnectPeerOption, ) error { + m.log.Debugw("adding outbound neighbor", "peer", p.ID()) return m.addNeighbor(ctx, p, group, m.dialPeer, connectOpts) } @@ -142,6 +143,7 @@ func (m *Manager) AddOutbound(ctx context.Context, p *peer.Peer, group Neighbors func (m *Manager) AddInbound(ctx context.Context, p *peer.Peer, group NeighborsGroup, connectOpts ...ConnectPeerOption, ) error { + m.log.Debugw("adding inbound neighbor", "peer", p.ID()) return m.addNeighbor(ctx, p, group, m.acceptPeer, connectOpts) } @@ -166,15 +168,25 @@ func (m *Manager) DropNeighbor(id identity.ID, group NeighborsGroup) error { return nil } -// getNeighborWithGroup returns neighbor by ID and group. -func (m *Manager) getNeighborWithGroup(id identity.ID, group NeighborsGroup) (*Neighbor, error) { - m.neighborsMutex.RLock() - defer m.neighborsMutex.RUnlock() - nbr, ok := m.neighbors[id] - if !ok || nbr.Group != group { - return nil, ErrUnknownNeighbor +// Send sends a message with the specific protocol to a set of neighbors. +func (m *Manager) Send(packet proto.Message, protocolID protocol.ID, to ...identity.ID) []*Neighbor { + neighbors := m.GetNeighborsByID(to) + if len(neighbors) == 0 { + neighbors = m.AllNeighbors() } - return nbr, nil + + for _, nbr := range neighbors { + stream := nbr.GetStream(protocolID) + if stream == nil { + m.log.Warnw("send error, no stream for protocol", "peer-id", nbr.ID(), "protocol", protocolID) + continue + } + if err := stream.WritePacket(packet); err != nil { + m.log.Warnw("send error", "peer-id", nbr.ID(), "err", err) + nbr.Close() + } + } + return neighbors } // AllNeighbors returns all the neighbors that are currently connected. @@ -188,6 +200,16 @@ func (m *Manager) AllNeighbors() []*Neighbor { return result } +// AllNeighborsIDs returns all the ids of the neighbors that are currently connected. +func (m *Manager) AllNeighborsIDs() (ids []identity.ID) { + ids = make([]identity.ID, 0) + neighbors := m.AllNeighbors() + for _, nbr := range neighbors { + ids = append(ids, nbr.Peer.ID()) + } + return +} + // GetNeighborsByID returns all the neighbors that are currently connected corresponding to the supplied ids. func (m *Manager) GetNeighborsByID(ids []identity.ID) []*Neighbor { result := make([]*Neighbor, 0, len(ids)) @@ -205,6 +227,17 @@ func (m *Manager) GetNeighborsByID(ids []identity.ID) []*Neighbor { return result } +// getNeighborWithGroup returns neighbor by ID and group. +func (m *Manager) getNeighborWithGroup(id identity.ID, group NeighborsGroup) (*Neighbor, error) { + m.neighborsMutex.RLock() + defer m.neighborsMutex.RUnlock() + nbr, ok := m.neighbors[id] + if !ok || nbr.Group != group { + return nil, ErrUnknownNeighbor + } + return nbr, nil +} + func (m *Manager) addNeighbor(ctx context.Context, p *peer.Peer, group NeighborsGroup, connectorFunc func(context.Context, *peer.Peer, []ConnectPeerOption) (map[protocol.ID]*PacketsStream, error), connectOpts []ConnectPeerOption, diff --git a/packages/node/p2p/neighbor.go b/packages/node/p2p/neighbor.go index 23320e3fe1..ad757ea7c1 100644 --- a/packages/node/p2p/neighbor.go +++ b/packages/node/p2p/neighbor.go @@ -139,9 +139,10 @@ func (n *Neighbor) disconnect() (err error) { if streamErr := stream.Close(); streamErr != nil { err = errors.WithStack(streamErr) } - n.Log.Info("Connection closed") - n.Events.Disconnected.Trigger(&NeighborDisconnectedEvent{}) + n.Log.Infow("Stream closed", "protocol", stream.Protocol()) } + n.Log.Info("Connection closed") + n.Events.Disconnected.Trigger(&NeighborDisconnectedEvent{}) }) return err } diff --git a/packages/node/p2p/stream.go b/packages/node/p2p/stream.go index 86b007f26e..a383696e8f 100644 --- a/packages/node/p2p/stream.go +++ b/packages/node/p2p/stream.go @@ -96,23 +96,24 @@ func (m *Manager) acceptPeer(ctx context.Context, p *peer.Peer, opts []ConnectPe return nil, ErrNoP2P } - handleInboundStream := func(protocolID protocol.ID) (*PacketsStream, error) { - conf := buildConnectPeerConfig(opts) - if conf.useDefaultTimeout { + handleInboundStream := func(ctx context.Context, protocolID protocol.ID) (*PacketsStream, error) { + if buildConnectPeerConfig(opts).useDefaultTimeout { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, defaultConnectionTimeout) defer cancel() } - am, err := newAcceptMatcher(p) + am, err := m.newAcceptMatcher(p, protocolID) if err != nil { return nil, errors.WithStack(err) } - if ok := m.setAcceptMatcher(am); !ok { + if am == nil { return nil, errors.WithStack(ErrDuplicateAccept) } - defer m.removeAcceptMatcher(am) + defer m.removeAcceptMatcher(am, protocolID) + + m.log.Debugw("waiting for incoming stream", "id", am.Peer.ID(), "proto", protocolID) select { - case ps := <-am.StreamCh: + case ps := <-am.StreamCh[protocolID]: if ps.Protocol() != protocolID { return nil, fmt.Errorf("accepted stream has wrong protocol: %s != %s", ps.Protocol(), protocolID) } @@ -120,7 +121,7 @@ func (m *Manager) acceptPeer(ctx context.Context, p *peer.Peer, opts []ConnectPe case <-ctx.Done(): err := ctx.Err() if errors.Is(err, context.DeadlineExceeded) { - m.log.Debugw("accept timeout", "id", am.Peer.ID()) + m.log.Debugw("accept timeout", "id", am.Peer.ID(), "proto", protocolID) return nil, errors.WithStack(ErrTimeout) } m.log.Debugw("context error", "id", am.Peer.ID(), "err", err) @@ -129,15 +130,15 @@ func (m *Manager) acceptPeer(ctx context.Context, p *peer.Peer, opts []ConnectPe } var acceptWG sync.WaitGroup - streams := make(map[protocol.ID]*PacketsStream) + streamsChan := make(chan *PacketsStream, len(m.registeredProtocols)) for protocolID := range m.registeredProtocols { acceptWG.Add(1) go func(protocolID protocol.ID) { defer acceptWG.Done() - stream, err := handleInboundStream(protocolID) + stream, err := handleInboundStream(ctx, protocolID) if err != nil { m.log.Errorf( - "accept %s / %s proto %s failed: %w", + "accept %s / %s proto %s failed: %s", net.JoinHostPort(p.IP().String(), strconv.Itoa(p2pEndpoint.Port())), p.ID(), protocolID, @@ -150,10 +151,16 @@ func (m *Manager) acceptPeer(ctx context.Context, p *peer.Peer, opts []ConnectPe "addr", stream.Conn().RemoteMultiaddr(), "proto", protocolID, ) - streams[protocolID] = stream + streamsChan <- stream }(protocolID) } acceptWG.Wait() + close(streamsChan) + + streams := make(map[protocol.ID]*PacketsStream) + for stream := range streamsChan { + streams[stream.Protocol()] = stream + } if len(streams) == 0 { return nil, fmt.Errorf("no streams accepted from peer %s", p.ID()) @@ -188,23 +195,26 @@ func (m *Manager) handleStream(stream network.Stream) { protocolHandler, registered := m.registeredProtocols[protocolID] if !registered { m.log.Errorf("cannot accept stream: protocol %s is not registered", protocolID) - m.CloseStream(stream) + m.closeStream(stream) return } ps := NewPacketsStream(stream, protocolHandler.PacketFactory) if err := protocolHandler.NegotiationReceive(ps); err != nil { m.log.Errorw("failed to receive negotiation message", "proto", protocolID, "err", err) - m.CloseStream(stream) + m.closeStream(stream) return } - am := m.MatchNewStream(stream) + am := m.matchNewStream(stream) + if am != nil { - am.StreamCh <- ps + m.log.Debugw("incoming stream matched", "id", am.Peer.ID(), "proto", protocolID) + am.StreamCh[protocolID] <- ps } else { // close the connection if not matched m.log.Debugw("unexpected connection", "addr", stream.Conn().RemoteMultiaddr(), - "id", stream.Conn().RemotePeer()) - m.CloseStream(stream) + "id", stream.Conn().RemotePeer(), "proto", protocolID) + m.closeStream(stream) + stream.Conn().Close() } } @@ -212,48 +222,60 @@ func (m *Manager) handleStream(stream network.Stream) { type AcceptMatcher struct { Peer *peer.Peer // connecting peer Libp2pID libp2ppeer.ID - StreamCh chan *PacketsStream + StreamCh map[protocol.ID]chan *PacketsStream } -func newAcceptMatcher(p *peer.Peer) (*AcceptMatcher, error) { +func (m *Manager) newAcceptMatcher(p *peer.Peer, protocolID protocol.ID) (*AcceptMatcher, error) { + m.acceptMutex.Lock() + defer m.acceptMutex.Unlock() + libp2pID, err := libp2putil.ToLibp2pPeerID(p) if err != nil { return nil, errors.WithStack(err) } - return &AcceptMatcher{ + + acceptMatcher, acceptExists := m.acceptMap[libp2pID] + if acceptExists { + if _, streamChanExists := acceptMatcher.StreamCh[protocolID]; streamChanExists { + return nil, nil + } + acceptMatcher.StreamCh[protocolID] = make(chan *PacketsStream) + return acceptMatcher, nil + } + + am := &AcceptMatcher{ Peer: p, Libp2pID: libp2pID, - StreamCh: make(chan *PacketsStream, 1), - }, nil -} - -func (m *Manager) setAcceptMatcher(am *AcceptMatcher) bool { - m.acceptMutex.Lock() - defer m.acceptMutex.Unlock() - _, exists := m.acceptMap[am.Libp2pID] - if exists { - return false + StreamCh: make(map[protocol.ID]chan *PacketsStream), } - m.acceptMap[am.Libp2pID] = am - return true + + am.StreamCh[protocolID] = make(chan *PacketsStream) + + m.acceptMap[libp2pID] = am + + return am, nil } -func (m *Manager) removeAcceptMatcher(am *AcceptMatcher) { +func (m *Manager) removeAcceptMatcher(am *AcceptMatcher, protocolID protocol.ID) { m.acceptMutex.Lock() defer m.acceptMutex.Unlock() - delete(m.acceptMap, am.Libp2pID) + + close(m.acceptMap[am.Libp2pID].StreamCh[protocolID]) + delete(m.acceptMap[am.Libp2pID].StreamCh, protocolID) + + if len(m.acceptMap[am.Libp2pID].StreamCh) == 0 { + delete(m.acceptMap, am.Libp2pID) + } } -// MatchNewStream matches a new stream with a peer. -func (m *Manager) MatchNewStream(stream network.Stream) *AcceptMatcher { +func (m *Manager) matchNewStream(stream network.Stream) *AcceptMatcher { m.acceptMutex.RLock() defer m.acceptMutex.RUnlock() am := m.acceptMap[stream.Conn().RemotePeer()] return am } -// CloseStream closes a stream. -func (m *Manager) CloseStream(s network.Stream) { +func (m *Manager) closeStream(s network.Stream) { if err := s.Close(); err != nil { m.log.Warnw("close error", "err", err) } diff --git a/packages/node/shutdown/order.go b/packages/node/shutdown/order.go index f7d9315b8a..f44d752820 100644 --- a/packages/node/shutdown/order.go +++ b/packages/node/shutdown/order.go @@ -21,6 +21,8 @@ const ( PriorityPrometheus // PriorityMetrics defines the shutdown priority for metrics server. PriorityMetrics + // PriorityWarpsync defines the shutdown priority for warpsync. + PriorityWarpsync // PriorityGossip defines the shutdown priority for gossip. PriorityGossip // PriorityP2P defines the shutdown priority for p2p. diff --git a/packages/node/warpsync/manager.go b/packages/node/warpsync/manager.go new file mode 100644 index 0000000000..8cca54ae7f --- /dev/null +++ b/packages/node/warpsync/manager.go @@ -0,0 +1,159 @@ +package warpsync + +import ( + "context" + "sync" + + "github.com/cockroachdb/errors" + "github.com/iotaledger/goshimmer/packages/core/epoch" + "github.com/iotaledger/goshimmer/packages/core/tangleold" + "github.com/iotaledger/goshimmer/packages/node/p2p" + "github.com/iotaledger/hive.go/core/autopeering/peer" + "github.com/iotaledger/hive.go/core/generics/event" + "github.com/iotaledger/hive.go/core/generics/options" + "github.com/iotaledger/hive.go/core/logger" + "github.com/iotaledger/hive.go/core/typeutils" +) + +const ( + protocolID = "warpsync/0.0.1" +) + +const minimumWindowSize = 10 + +// LoadBlockFunc defines a function that returns the block for the given id. +type LoadBlockFunc func(blockId tangleold.BlockID) (*tangleold.Block, error) + +// ProcessBlockFunc defines a function that processes block's bytes from a given peer. +type ProcessBlockFunc func(blk *tangleold.Block, peer *peer.Peer) + +// The Manager handles the connected neighbors. +type Manager struct { + p2pManager *p2p.Manager + + log *logger.Logger + + active typeutils.AtomicBool + stopped typeutils.AtomicBool + + blockLoaderFunc LoadBlockFunc + blockProcessorFunc ProcessBlockFunc + + concurrency int + blockBatchSize int + + validationInProgress bool + validationLock sync.RWMutex + commitmentsChan chan *neighborCommitment + commitmentsStopChan chan struct{} + + syncingInProgress bool + syncingLock sync.RWMutex + epochsChannels map[epoch.Index]*epochChannels + + successfulSyncEpoch epoch.Index + + sync.RWMutex +} + +type epochChannels struct { + sync.RWMutex + startChan chan *epochSyncStart + blockChan chan *epochSyncBlock + endChan chan *epochSyncEnd + stopChan chan struct{} + active bool +} + +// NewManager creates a new Manager. +func NewManager(p2pManager *p2p.Manager, blockLoaderFunc LoadBlockFunc, blockProcessorFunc ProcessBlockFunc, log *logger.Logger, opts ...options.Option[Manager]) *Manager { + m := &Manager{ + p2pManager: p2pManager, + log: log, + blockLoaderFunc: blockLoaderFunc, + blockProcessorFunc: blockProcessorFunc, + } + + m.p2pManager.RegisterProtocol(protocolID, &p2p.ProtocolHandler{ + PacketFactory: warpsyncPacketFactory, + NegotiationSend: sendNegotiationMessage, + NegotiationReceive: receiveNegotiationMessage, + PacketHandler: m.handlePacket, + }) + + options.Apply(m, opts) + + return m +} + +// WithConcurrency allows to set how many epochs can be requested at once. +func WithConcurrency(concurrency int) options.Option[Manager] { + return func(m *Manager) { + m.concurrency = concurrency + } +} + +// WithBlockBatchSize allows to set the size of the block batch returned as part of epoch blocks response. +func WithBlockBatchSize(blockBatchSize int) options.Option[Manager] { + return func(m *Manager) { + m.blockBatchSize = blockBatchSize + } +} + +func (m *Manager) WarpRange(ctx context.Context, start, end epoch.Index, startEC epoch.EC, endPrevEC epoch.EC) (err error) { + if m.IsStopped() { + return errors.Errorf("warpsync manager is stopped") + } + + if m.active.IsSet() { + m.log.Debugf("WarpRange: already syncing or validating") + return nil + } + + m.Lock() + defer m.Unlock() + + // Skip warpsyncing if the requested range overlaps with a previous run. + if end-m.successfulSyncEpoch < minimumWindowSize { + m.log.Debugf("WarpRange: already synced to %d", m.successfulSyncEpoch) + return nil + } + + m.active.Set() + defer m.active.UnSet() + + m.log.Infof("warpsyncing range %d-%d on chain %s -> %s", start, end, startEC.Base58(), endPrevEC.Base58()) + + ecChain, validPeers, validateErr := m.validateBackwards(ctx, start, end, startEC, endPrevEC) + if validateErr != nil { + return errors.Wrapf(validateErr, "failed to validate range %d-%d", start, end) + } + lowestProcessedEpoch, syncRangeErr := m.syncRange(ctx, start, end, startEC, ecChain, validPeers) + if syncRangeErr != nil { + return errors.Wrapf(syncRangeErr, "failed to sync range %d-%d with peers %s", start, end, validPeers) + } + + m.log.Infof("range %d-%d synced", start, lowestProcessedEpoch) + + m.successfulSyncEpoch = lowestProcessedEpoch + 1 + + return nil +} + +// IsStopped returns true if the manager is stopped. +func (m *Manager) IsStopped() bool { + return m.stopped.IsSet() +} + +// Stop stops the manager and closes all established connections. +func (m *Manager) Stop() { + m.stopped.Set() + m.p2pManager.UnregisterProtocol(protocolID) +} + +func submitTask[P any](packetProcessor func(packet P, nbr *p2p.Neighbor), packet P, nbr *p2p.Neighbor) error { + if added := event.Loop.TrySubmit(func() { packetProcessor(packet, nbr) }); !added { + return errors.Errorf("WorkerPool full: packet block discarded") + } + return nil +} diff --git a/packages/node/warpsync/proto.go b/packages/node/warpsync/proto.go new file mode 100644 index 0000000000..6de11d4c36 --- /dev/null +++ b/packages/node/warpsync/proto.go @@ -0,0 +1,130 @@ +package warpsync + +import ( + "github.com/cockroachdb/errors" + "github.com/iotaledger/goshimmer/packages/core/epoch" + "github.com/iotaledger/goshimmer/packages/core/tangleold" + "github.com/iotaledger/goshimmer/packages/node/p2p" + wp "github.com/iotaledger/goshimmer/packages/node/warpsync/warpsyncproto" + "github.com/iotaledger/hive.go/core/identity" + "google.golang.org/protobuf/proto" +) + +func (m *Manager) handlePacket(nbr *p2p.Neighbor, packet proto.Message) error { + wpPacket := packet.(*wp.Packet) + switch packetBody := wpPacket.GetBody().(type) { + case *wp.Packet_EpochBlocksRequest: + return submitTask(m.processEpochBlocksRequestPacket, packetBody, nbr) + case *wp.Packet_EpochBlocksStart: + return submitTask(m.processEpochBlocksStartPacket, packetBody, nbr) + case *wp.Packet_EpochBlocksBatch: + return submitTask(m.processEpochBlocksBatchPacket, packetBody, nbr) + case *wp.Packet_EpochBlocksEnd: + return submitTask(m.processEpochBlocksEndPacket, packetBody, nbr) + case *wp.Packet_EpochCommitmentRequest: + return submitTask(m.processEpochCommittmentRequestPacket, packetBody, nbr) + case *wp.Packet_EpochCommitment: + return submitTask(m.processEpochCommittmentPacket, packetBody, nbr) + default: + return errors.Errorf("unsupported packet; packet=%+v, packetBody=%T-%+v", wpPacket, packetBody, packetBody) + } +} + +func (m *Manager) requestEpochCommittment(ei epoch.Index, to ...identity.ID) { + committmentReq := &wp.EpochCommittmentRequest{EI: int64(ei)} + packet := &wp.Packet{Body: &wp.Packet_EpochCommitmentRequest{EpochCommitmentRequest: committmentReq}} + m.p2pManager.Send(packet, protocolID, to...) + m.log.Debugw("sent epoch committment request", "EI", ei) +} + +func (m *Manager) sendEpochCommittmentMessage(ei epoch.Index, ecr epoch.ECR, prevEC epoch.EC, to ...identity.ID) { + committmentRes := &wp.EpochCommittment{ + EI: int64(ei), + ECR: ecr.Bytes(), + PrevEC: prevEC.Bytes(), + } + packet := &wp.Packet{Body: &wp.Packet_EpochCommitment{EpochCommitment: committmentRes}} + + m.p2pManager.Send(packet, protocolID, to...) +} + +func (m *Manager) requestEpochBlocks(ei epoch.Index, ec epoch.EC, to ...identity.ID) { + epochBlocksReq := &wp.EpochBlocksRequest{ + EI: int64(ei), + EC: ec.Bytes(), + } + packet := &wp.Packet{Body: &wp.Packet_EpochBlocksRequest{EpochBlocksRequest: epochBlocksReq}} + m.p2pManager.Send(packet, protocolID, to...) + + m.log.Debugw("sent epoch blocks request", "EI", ei, "EC", ec.Base58()) +} + +func (m *Manager) sendEpochStarter(ei epoch.Index, ec epoch.EC, blocksCount int, to ...identity.ID) { + epochStartRes := &wp.EpochBlocksStart{ + EI: int64(ei), + EC: ec.Bytes(), + BlocksCount: int64(blocksCount), + } + packet := &wp.Packet{Body: &wp.Packet_EpochBlocksStart{EpochBlocksStart: epochStartRes}} + + m.p2pManager.Send(packet, protocolID, to...) +} + +func (m *Manager) sendBlocksBatch(ei epoch.Index, ec epoch.EC, blocks []*tangleold.Block, to ...identity.ID) { + blocksBytes := make([][]byte, len(blocks)) + + for i, block := range blocks { + blockBytes, err := block.Bytes() + if err != nil { + m.log.Errorf("failed to serialize block %s: %s", block.ID(), err) + return + } + blocksBytes[i] = blockBytes + } + + blocksBatchRes := &wp.EpochBlocksBatch{ + EI: int64(ei), + EC: ec.Bytes(), + Blocks: blocksBytes, + } + packet := &wp.Packet{Body: &wp.Packet_EpochBlocksBatch{EpochBlocksBatch: blocksBatchRes}} + + m.p2pManager.Send(packet, protocolID, to...) +} + +func (m *Manager) sendEpochEnd(ei epoch.Index, ec epoch.EC, roots *epoch.CommitmentRoots, to ...identity.ID) { + epochBlocksEnd := &wp.EpochBlocksEnd{ + EI: int64(ei), + EC: ec.Bytes(), + StateMutationRoot: roots.StateMutationRoot.Bytes(), + StateRoot: roots.StateRoot.Bytes(), + ManaRoot: roots.ManaRoot.Bytes(), + } + packet := &wp.Packet{Body: &wp.Packet_EpochBlocksEnd{EpochBlocksEnd: epochBlocksEnd}} + + m.p2pManager.Send(packet, protocolID, to...) +} + +func warpsyncPacketFactory() proto.Message { + return &wp.Packet{} +} + +func sendNegotiationMessage(ps *p2p.PacketsStream) error { + packet := &wp.Packet{Body: &wp.Packet_Negotiation{Negotiation: &wp.Negotiation{}}} + return errors.WithStack(ps.WritePacket(packet)) +} + +func receiveNegotiationMessage(ps *p2p.PacketsStream) (err error) { + packet := &wp.Packet{} + if err := ps.ReadPacket(packet); err != nil { + return errors.WithStack(err) + } + packetBody := packet.GetBody() + if _, ok := packetBody.(*wp.Packet_Negotiation); !ok { + return errors.Newf( + "received packet isn't the negotiation packet; packet=%+v, packetBody=%T-%+v", + packet, packetBody, packetBody, + ) + } + return nil +} diff --git a/packages/node/warpsync/syncing.go b/packages/node/warpsync/syncing.go new file mode 100644 index 0000000000..3cd1217250 --- /dev/null +++ b/packages/node/warpsync/syncing.go @@ -0,0 +1,357 @@ +package warpsync + +import ( + "context" + + "github.com/celestiaorg/smt" + "github.com/cockroachdb/errors" + "github.com/iotaledger/hive.go/core/autopeering/peer" + "github.com/iotaledger/hive.go/core/generics/dataflow" + "github.com/iotaledger/hive.go/core/generics/lo" + "github.com/iotaledger/hive.go/core/generics/set" + "github.com/iotaledger/hive.go/core/identity" + "github.com/iotaledger/hive.go/core/types" + "golang.org/x/crypto/blake2b" + "golang.org/x/sync/errgroup" + + "github.com/iotaledger/goshimmer/packages/core/epoch" + "github.com/iotaledger/goshimmer/packages/core/tangleold" + "github.com/iotaledger/goshimmer/packages/node/database" + "github.com/iotaledger/goshimmer/packages/node/p2p" + wp "github.com/iotaledger/goshimmer/packages/node/warpsync/warpsyncproto" + "github.com/iotaledger/goshimmer/plugins/epochstorage" +) + +type epochSyncStart struct { + ei epoch.Index + ec epoch.EC + blocksCount int64 +} + +type epochSyncBlock struct { + ei epoch.Index + ec epoch.EC + block *tangleold.Block + peer *peer.Peer +} + +type epochSyncEnd struct { + ei epoch.Index + ec epoch.EC + stateMutationRoot epoch.MerkleRoot + stateRoot epoch.MerkleRoot + manaRoot epoch.MerkleRoot +} + +type blockReceived struct { + block *tangleold.Block + peer *peer.Peer +} + +func (m *Manager) syncRange(ctx context.Context, start, end epoch.Index, startEC epoch.EC, ecChain map[epoch.Index]epoch.EC, validPeers *set.AdvancedSet[identity.ID]) (completedEpoch epoch.Index, err error) { + startRange := start + 1 + endRange := end - 1 + + m.startSyncing(startRange, endRange) + defer m.endSyncing() + + eg, errCtx := errgroup.WithContext(ctx) + eg.SetLimit(m.concurrency) + + epochProcessedChan := make(chan epoch.Index) + discardedPeers := set.NewAdvancedSet[identity.ID]() + + workerFunc := m.syncEpochFunc(errCtx, eg, validPeers, discardedPeers, ecChain, epochProcessedChan) + completedEpoch = m.queueSlidingEpochs(errCtx, startRange, endRange, workerFunc, epochProcessedChan) + + if err := eg.Wait(); err != nil { + return completedEpoch, errors.Wrapf(err, "sync failed for range %d-%d", start, end) + } + return completedEpoch, nil +} + +func (m *Manager) syncEpochFunc(errCtx context.Context, eg *errgroup.Group, validPeers *set.AdvancedSet[identity.ID], discardedPeers *set.AdvancedSet[identity.ID], ecChain map[epoch.Index]epoch.EC, epochProcessedChan chan epoch.Index) func(targetEpoch epoch.Index) { + return func(targetEpoch epoch.Index) { + eg.Go(func() error { + success := false + for it := validPeers.Iterator(); it.HasNext() && !success; { + peerID := it.Next() + if discardedPeers.Has(peerID) { + m.log.Debugw("skipping discarded peer", "peer", peerID) + continue + } + + db, _ := database.NewMemDB() + tangleTree := smt.NewSparseMerkleTree(db.NewStore(), db.NewStore(), lo.PanicOnErr(blake2b.New256(nil))) + + epochChannels := m.startEpochSyncing(targetEpoch) + epochChannels.RLock() + + m.requestEpochBlocks(targetEpoch, ecChain[targetEpoch], peerID) + + dataflow.New( + m.epochStartCommand, + m.epochBlockCommand, + m.epochEndCommand, + m.epochVerifyCommand, + m.epochProcessBlocksCommand, + ).WithTerminationCallback(func(params *syncingFlowParams) { + params.epochChannels.RUnlock() + m.endEpochSyncing(params.targetEpoch) + }).WithSuccessCallback(func(params *syncingFlowParams) { + success = true + select { + case <-params.ctx.Done(): + return + case epochProcessedChan <- params.targetEpoch: + } + m.log.Infow("synced epoch", "epoch", params.targetEpoch, "peer", params.peerID) + }).WithErrorCallback(func(flowErr error, params *syncingFlowParams) { + discardedPeers.Add(params.peerID) + m.log.Warnf("error while syncing epoch %d from peer %s: %s", params.targetEpoch, params.peerID, flowErr) + }).Run(&syncingFlowParams{ + ctx: errCtx, + targetEpoch: targetEpoch, + targetEC: ecChain[targetEpoch], + targetPrevEC: ecChain[targetEpoch-1], + epochChannels: epochChannels, + peerID: peerID, + tangleTree: tangleTree, + epochBlocks: make(map[tangleold.BlockID]*tangleold.Block), + }) + } + + if !success { + return errors.Errorf("unable to sync epoch %d", targetEpoch) + } + + return nil + }) + } +} + +func (m *Manager) queueSlidingEpochs(errCtx context.Context, startRange, endRange epoch.Index, workerFunc func(epoch.Index), epochProcessedChan chan epoch.Index) (completedEpoch epoch.Index) { + processedEpochs := make(map[epoch.Index]types.Empty) + for ei := startRange; ei < startRange+epoch.Index(m.concurrency) && ei <= endRange; ei++ { + workerFunc(ei) + } + + windowStart := startRange + for { + select { + case processedEpoch := <-epochProcessedChan: + processedEpochs[processedEpoch] = types.Void + for { + if _, processed := processedEpochs[windowStart]; processed { + completedEpoch = windowStart + if completedEpoch == endRange { + return + } + windowEnd := windowStart + epoch.Index(m.concurrency) + if windowEnd <= endRange { + workerFunc(windowEnd) + } + windowStart++ + } else { + break + } + } + case <-errCtx.Done(): + return + } + } +} + +func (m *Manager) startSyncing(startRange, endRange epoch.Index) { + m.syncingLock.Lock() + defer m.syncingLock.Unlock() + + m.syncingInProgress = true + m.epochsChannels = make(map[epoch.Index]*epochChannels) + for ei := startRange; ei <= endRange; ei++ { + m.epochsChannels[ei] = &epochChannels{} + } +} + +func (m *Manager) endSyncing() { + m.syncingLock.Lock() + defer m.syncingLock.Unlock() + + m.syncingInProgress = false + m.epochsChannels = nil +} + +func (m *Manager) startEpochSyncing(ei epoch.Index) (epochChannels *epochChannels) { + m.syncingLock.Lock() + defer m.syncingLock.Unlock() + + epochChannels = m.epochsChannels[ei] + + epochChannels.Lock() + defer epochChannels.Unlock() + + epochChannels.startChan = make(chan *epochSyncStart, 1) + epochChannels.blockChan = make(chan *epochSyncBlock, 1) + epochChannels.endChan = make(chan *epochSyncEnd, 1) + epochChannels.stopChan = make(chan struct{}) + epochChannels.active = true + + return +} + +func (m *Manager) endEpochSyncing(ei epoch.Index) { + m.syncingLock.Lock() + defer m.syncingLock.Unlock() + + epochChannels := m.epochsChannels[ei] + + epochChannels.active = false + close(epochChannels.stopChan) + epochChannels.Lock() + defer epochChannels.Unlock() + + close(epochChannels.startChan) + close(epochChannels.blockChan) + close(epochChannels.endChan) +} + +func (m *Manager) processEpochBlocksRequestPacket(packetEpochRequest *wp.Packet_EpochBlocksRequest, nbr *p2p.Neighbor) { + ei := epoch.Index(packetEpochRequest.EpochBlocksRequest.GetEI()) + ec := epoch.NewMerkleRoot(packetEpochRequest.EpochBlocksRequest.GetEC()) + + m.log.Debugw("received epoch blocks request", "peer", nbr.Peer.ID(), "EI", ei, "EC", ec) + + ecRecord, exists := epochstorage.GetEpochCommittment(ei) + if !exists || ec != ecRecord.ComputeEC() { + m.log.Debugw("epoch blocks request rejected: unknown epoch or mismatching EC", "peer", nbr.Peer.ID(), "EI", ei, "EC", ec) + return + } + blockIDs := epochstorage.GetEpochBlockIDs(ei) + blocksCount := len(blockIDs) + + // Send epoch starter. + m.sendEpochStarter(ei, ec, blocksCount, nbr.ID()) + m.log.Debugw("sent epoch start", "peer", nbr.Peer.ID(), "EI", ei, "blocksCount", blocksCount) + + // Send epoch's blocks in batches. + for batchNum := 0; batchNum <= len(blockIDs)/m.blockBatchSize; batchNum++ { + blocks := make([]*tangleold.Block, 0) + for i := batchNum * m.blockBatchSize; i < len(blockIDs) && i < (batchNum+1)*m.blockBatchSize; i++ { + block, err := m.blockLoaderFunc(blockIDs[i]) + if err != nil { + m.log.Errorf("failed to load block %s: %s", blockIDs[i], err) + return + } + blocks = append(blocks, block) + } + + m.sendBlocksBatch(ei, ec, blocks, nbr.ID()) + m.log.Debugw("sent epoch blocks batch", "peer", nbr.ID(), "EI", ei, "blocksLen", len(blocks)) + } + + // Send epoch terminator. + m.sendEpochEnd(ei, ec, ecRecord.Roots(), nbr.ID()) + m.log.Debugw("sent epoch blocks end", "peer", nbr.ID(), "EI", ei, "EC", ec.Base58()) +} + +func (m *Manager) processEpochBlocksStartPacket(packetEpochBlocksStart *wp.Packet_EpochBlocksStart, nbr *p2p.Neighbor) { + epochBlocksStart := packetEpochBlocksStart.EpochBlocksStart + ei := epoch.Index(epochBlocksStart.GetEI()) + + epochChannels := m.getEpochChannels(ei) + if epochChannels == nil { + return + } + + epochChannels.RLock() + defer epochChannels.RUnlock() + + if !epochChannels.active { + return + } + + m.log.Debugw("received epoch blocks start", "peer", nbr.Peer.ID(), "EI", ei, "blocksCount", epochBlocksStart.GetBlocksCount()) + + epochChannels.startChan <- &epochSyncStart{ + ei: ei, + ec: epoch.NewMerkleRoot(epochBlocksStart.GetEC()), + blocksCount: epochBlocksStart.GetBlocksCount(), + } +} + +func (m *Manager) processEpochBlocksBatchPacket(packetEpochBlocksBatch *wp.Packet_EpochBlocksBatch, nbr *p2p.Neighbor) { + epochBlocksBatch := packetEpochBlocksBatch.EpochBlocksBatch + ei := epoch.Index(epochBlocksBatch.GetEI()) + + epochChannels := m.getEpochChannels(ei) + if epochChannels == nil { + return + } + + epochChannels.RLock() + defer epochChannels.RUnlock() + + if !epochChannels.active { + return + } + + blocksBytes := epochBlocksBatch.GetBlocks() + m.log.Debugw("received epoch blocks", "peer", nbr.Peer.ID(), "EI", ei, "blocksLen", len(blocksBytes)) + + for _, blockBytes := range blocksBytes { + block := new(tangleold.Block) + if err := block.FromBytes(blockBytes); err != nil { + m.log.Errorw("failed to deserialize block", "peer", nbr.Peer.ID(), "err", err) + return + } + + select { + case <-epochChannels.stopChan: + return + case epochChannels.blockChan <- &epochSyncBlock{ + ei: ei, + ec: epoch.NewMerkleRoot(epochBlocksBatch.GetEC()), + peer: nbr.Peer, + block: block, + }: + } + } +} + +func (m *Manager) processEpochBlocksEndPacket(packetEpochBlocksEnd *wp.Packet_EpochBlocksEnd, nbr *p2p.Neighbor) { + epochBlocksBatch := packetEpochBlocksEnd.EpochBlocksEnd + ei := epoch.Index(epochBlocksBatch.GetEI()) + + epochChannels := m.getEpochChannels(ei) + if epochChannels == nil { + return + } + + epochChannels.RLock() + defer epochChannels.RUnlock() + + if !epochChannels.active { + return + } + + m.log.Debugw("received epoch blocks end", "peer", nbr.Peer.ID(), "EI", ei) + + epochChannels.endChan <- &epochSyncEnd{ + ei: ei, + ec: epoch.NewMerkleRoot(packetEpochBlocksEnd.EpochBlocksEnd.GetEC()), + stateMutationRoot: epoch.NewMerkleRoot(packetEpochBlocksEnd.EpochBlocksEnd.GetStateMutationRoot()), + stateRoot: epoch.NewMerkleRoot(packetEpochBlocksEnd.EpochBlocksEnd.GetStateRoot()), + manaRoot: epoch.NewMerkleRoot(packetEpochBlocksEnd.EpochBlocksEnd.GetManaRoot()), + } +} + +func (m *Manager) getEpochChannels(ei epoch.Index) *epochChannels { + m.syncingLock.RLock() + defer m.syncingLock.RUnlock() + + if !m.syncingInProgress { + return nil + } + + return m.epochsChannels[ei] +} diff --git a/packages/node/warpsync/syncing_dataflow.go b/packages/node/warpsync/syncing_dataflow.go new file mode 100644 index 0000000000..5ce2482ba3 --- /dev/null +++ b/packages/node/warpsync/syncing_dataflow.go @@ -0,0 +1,140 @@ +package warpsync + +import ( + "context" + + "github.com/celestiaorg/smt" + "github.com/cockroachdb/errors" + "github.com/iotaledger/goshimmer/packages/core/epoch" + "github.com/iotaledger/goshimmer/packages/core/tangleold" + "github.com/iotaledger/hive.go/core/generics/dataflow" + "github.com/iotaledger/hive.go/core/identity" +) + +// syncingFlowParams is a container for parameters to be used in the warpsyncing of an epoch. +type syncingFlowParams struct { + ctx context.Context + targetEpoch epoch.Index + targetEC epoch.EC + targetPrevEC epoch.EC + epochChannels *epochChannels + peerID identity.ID + tangleTree *smt.SparseMerkleTree + epochBlocksLeft int64 + epochBlocks map[tangleold.BlockID]*tangleold.Block + stateMutationRoot epoch.MerkleRoot + stateRoot epoch.MerkleRoot + manaRoot epoch.MerkleRoot +} + +func (m *Manager) epochStartCommand(params *syncingFlowParams, next dataflow.Next[*syncingFlowParams]) (err error) { + select { + case epochStart, ok := <-params.epochChannels.startChan: + if !ok { + return nil + } + if valid, err := isOnTargetChain(epochStart.ei, epochStart.ec, params); !valid { + return errors.Wrap(err, "received invalid epoch start") + } + + params.epochBlocksLeft = epochStart.blocksCount + m.log.Debugw("read epoch block count", "EI", epochStart.ei, "blocksCount", params.epochBlocksLeft) + case <-params.ctx.Done(): + return errors.Errorf("cancelled while receiving epoch %d start: %s", params.targetEpoch, params.ctx.Err()) + } + + return next(params) +} + +func (m *Manager) epochBlockCommand(params *syncingFlowParams, next dataflow.Next[*syncingFlowParams]) (err error) { + for { + if params.epochBlocksLeft == 0 { + m.log.Debugf("all blocks for epoch %d received", params.targetEpoch) + break + } + select { + case epochBlock, ok := <-params.epochChannels.blockChan: + if !ok { + return nil + } + if valid, err := isOnTargetChain(epochBlock.ei, epochBlock.ec, params); !valid { + return errors.Wrap(err, "received invalid block") + } + + block := epochBlock.block + if _, exists := params.epochBlocks[block.ID()]; exists { + return errors.Errorf("received duplicate block %s for epoch %d", block.ID(), params.targetEpoch) + } + + m.log.Debugw("read block", "peer", params.peerID, "EI", epochBlock.ei, "blockID", block.ID()) + + params.tangleTree.Update(block.IDBytes(), block.IDBytes()) + params.epochBlocks[block.ID()] = block + params.epochBlocksLeft-- + + m.log.Debugf("epoch %d: %d blocks left", params.targetEpoch, params.epochBlocksLeft) + case <-params.ctx.Done(): + return errors.Errorf("cancelled while receiving blocks for epoch %d: %s", params.targetEpoch, params.ctx.Err()) + } + } + + return next(params) +} + +func (m *Manager) epochEndCommand(params *syncingFlowParams, next dataflow.Next[*syncingFlowParams]) (err error) { + select { + case epochEnd, ok := <-params.epochChannels.endChan: + if !ok { + return nil + } + if valid, err := isOnTargetChain(epochEnd.ei, epochEnd.ec, params); !valid { + return errors.Wrap(err, "received invalid epoch end") + } + + params.stateMutationRoot = epochEnd.stateMutationRoot + params.stateRoot = epochEnd.stateRoot + params.manaRoot = epochEnd.manaRoot + + m.log.Debugw("read epoch end", "EI", params.targetEpoch) + case <-params.ctx.Done(): + return errors.Errorf("cancelled while ending epoch %d: %s", params.targetEpoch, params.ctx.Err()) + } + + return next(params) +} + +func (m *Manager) epochVerifyCommand(params *syncingFlowParams, next dataflow.Next[*syncingFlowParams]) (err error) { + syncedECRecord := epoch.NewECRecord(params.targetEpoch) + syncedECRecord.SetECR(epoch.ComputeECR( + epoch.NewMerkleRoot(params.tangleTree.Root()), + params.stateMutationRoot, + params.stateRoot, + params.manaRoot, + )) + syncedECRecord.SetPrevEC(params.targetPrevEC) + + if syncedECRecord.ComputeEC() != params.targetEC { + return errors.Errorf("epoch %d EC record is not correct", params.targetEpoch) + } + + return next(params) +} + +func (m *Manager) epochProcessBlocksCommand(params *syncingFlowParams, next dataflow.Next[*syncingFlowParams]) (err error) { + for _, blk := range params.epochBlocks { + m.blockProcessorFunc(blk, m.p2pManager.GetNeighborsByID([]identity.ID{params.peerID})[0].Peer) + } + + return next(params) +} + +func isOnTargetChain(ei epoch.Index, ec epoch.EC, params *syncingFlowParams) (valid bool, err error) { + if ei != params.targetEpoch { + return false, errors.Errorf("received epoch %d while we expected epoch %d", ei, params.targetEpoch) + } + if ec != params.targetEC { + return false, errors.Errorf("received on wrong EC chain for epoch %d", params.targetEpoch) + } + + return true, nil +} diff --git a/packages/node/warpsync/validation.go b/packages/node/warpsync/validation.go new file mode 100644 index 0000000000..2a1a0195a4 --- /dev/null +++ b/packages/node/warpsync/validation.go @@ -0,0 +1,199 @@ +package warpsync + +import ( + "context" + + "github.com/cockroachdb/errors" + "github.com/iotaledger/goshimmer/packages/core/epoch" + "github.com/iotaledger/goshimmer/packages/node/p2p" + wp "github.com/iotaledger/goshimmer/packages/node/warpsync/warpsyncproto" + "github.com/iotaledger/goshimmer/plugins/epochstorage" + "github.com/iotaledger/hive.go/core/generics/set" + "github.com/iotaledger/hive.go/core/identity" +) + +type neighborCommitment struct { + neighbor *p2p.Neighbor + ecRecord *epoch.ECRecord +} + +func (m *Manager) validateBackwards(ctx context.Context, start, end epoch.Index, startEC, endPrevEC epoch.EC) (ecChain map[epoch.Index]epoch.EC, validPeers *set.AdvancedSet[identity.ID], err error) { + m.startValidation() + defer m.endValidation() + + ecChain = make(map[epoch.Index]epoch.EC) + ecRecordChain := make(map[epoch.Index]*epoch.ECRecord) + validPeers = set.NewAdvancedSet(m.p2pManager.AllNeighborsIDs()...) + activePeers := set.NewAdvancedSet[identity.ID]() + neighborCommitments := make(map[epoch.Index]map[identity.ID]*neighborCommitment) + + // We do not request the start nor the ending epoch, as we know the beginning (snapshot) and the end (tip received via gossip) of the chain. + startRange := start + 1 + endRange := end - 1 + for ei := endRange; ei >= startRange; ei-- { + m.requestEpochCommittment(ei) + } + + epochToValidate := endRange + ecChain[start] = startEC + ecRecordChain[end] = epoch.NewECRecord(end) + ecRecordChain[end].SetPrevEC(endPrevEC) + + for { + select { + case commitment, ok := <-m.commitmentsChan: + if !ok { + return nil, nil, nil + } + ecRecord := commitment.ecRecord + peerID := commitment.neighbor.Peer.ID() + commitmentEI := ecRecord.EI() + m.log.Debugw("read committment", "EI", commitmentEI, "EC", ecRecord.ComputeEC().Base58()) + + activePeers.Add(peerID) + // Ignore invalid neighbor. + if !validPeers.Has(peerID) { + m.log.Debugw("ignoring invalid neighbor", "ID", peerID, "validPeers", validPeers) + continue + } + + // Ignore committments outside of the range. + if commitmentEI < startRange || commitmentEI > endRange { + m.log.Debugw("ignoring committment outside of requested range", "EI", commitmentEI, "peer", peerID) + continue + + } + + // If we already validated this epoch, we check if the neighbor is on the target chain. + if commitmentEI > epochToValidate { + if ecRecordChain[commitmentEI].ComputeEC() != ecRecord.ComputeEC() { + m.log.Infow("ignoring commitment outside of the target chain", "peer", peerID) + validPeers.Delete(peerID) + } + continue + } + + // commitmentEI <= epochToValidate + if neighborCommitments[commitmentEI] == nil { + neighborCommitments[commitmentEI] = make(map[identity.ID]*neighborCommitment) + } + neighborCommitments[commitmentEI][peerID] = commitment + + // We received a committment out of order, we can evaluate it only later. + if commitmentEI < epochToValidate { + continue + } + + // commitmentEI == epochToValidate + // Validate commitments collected so far. + for { + neighborCommitmentsForEpoch, received := neighborCommitments[epochToValidate] + // We haven't received commitments for this epoch yet. + if !received { + break + } + + for peerID, epochCommitment := range neighborCommitmentsForEpoch { + if !validPeers.Has(peerID) { + continue + } + proposedECRecord := epochCommitment.ecRecord + if ecRecordChain[epochToValidate+1].PrevEC() != proposedECRecord.ComputeEC() { + m.log.Infow("ignoring commitment outside of the target chain", "peer", peerID) + validPeers.Delete(peerID) + continue + } + + // If we already stored the target epoch for the chain, we just keep validating neighbors. + if _, exists := ecRecordChain[epochToValidate]; exists { + continue + } + + // We store the valid committment for this chain. + ecRecordChain[epochToValidate] = proposedECRecord + ecChain[epochToValidate] = proposedECRecord.ComputeEC() + } + + // Stop if we were not able to validate epochToValidate. + if _, exists := ecRecordChain[epochToValidate]; !exists { + break + } + + // We validated the epoch and identified the neighbors that are on the target chain. + epochToValidate-- + m.log.Debugf("epochs left %d", epochToValidate-start) + } + + if epochToValidate == start { + syncedStartPrevEC := ecRecordChain[start+1].PrevEC() + if startEC != syncedStartPrevEC { + return nil, nil, errors.Errorf("obtained chain does not match expected starting point EC: expected %s, actual %s", startEC.Base58(), syncedStartPrevEC.Base58()) + } + m.log.Infof("range %d-%d validated", start, end) + validPeers = validPeers.Intersect(activePeers) + return ecChain, validPeers, nil + } + + case <-ctx.Done(): + return nil, nil, errors.Errorf("cancelled while validating epoch range %d to %d: %s", start, end, ctx.Err()) + } + } +} + +func (m *Manager) startValidation() { + m.validationLock.Lock() + defer m.validationLock.Unlock() + m.validationInProgress = true + m.commitmentsChan = make(chan *neighborCommitment) + m.commitmentsStopChan = make(chan struct{}) +} + +func (m *Manager) endValidation() { + close(m.commitmentsStopChan) + m.validationLock.Lock() + defer m.validationLock.Unlock() + m.validationInProgress = false + close(m.commitmentsChan) +} + +func (m *Manager) processEpochCommittmentRequestPacket(packetEpochRequest *wp.Packet_EpochCommitmentRequest, nbr *p2p.Neighbor) { + ei := epoch.Index(packetEpochRequest.EpochCommitmentRequest.GetEI()) + m.log.Debugw("received epoch committment request", "peer", nbr.Peer.ID(), "EI", ei) + + ecRecord, exists := epochstorage.GetEpochCommittment(ei) + if !exists { + return + } + + m.sendEpochCommittmentMessage(ei, ecRecord.ECR(), ecRecord.PrevEC(), nbr.ID()) + + m.log.Debugw("sent epoch committment", "peer", nbr.Peer.ID(), "EI", ei, "EC", ecRecord.ComputeEC().Base58()) +} + +func (m *Manager) processEpochCommittmentPacket(packetEpochCommittment *wp.Packet_EpochCommitment, nbr *p2p.Neighbor) { + m.validationLock.RLock() + defer m.validationLock.RUnlock() + + if !m.validationInProgress { + return + } + + ei := epoch.Index(packetEpochCommittment.EpochCommitment.GetEI()) + ecr := epoch.NewMerkleRoot(packetEpochCommittment.EpochCommitment.GetECR()) + prevEC := epoch.NewMerkleRoot(packetEpochCommittment.EpochCommitment.GetPrevEC()) + + ecRecord := epoch.NewECRecord(ei) + ecRecord.SetECR(ecr) + ecRecord.SetPrevEC(prevEC) + + m.log.Debugw("received epoch committment", "peer", nbr.Peer.ID(), "EI", ei, "EC", ecRecord.ComputeEC().Base58()) + + select { + case <-m.commitmentsStopChan: + return + case m.commitmentsChan <- &neighborCommitment{ + neighbor: nbr, + ecRecord: ecRecord, + }: + } +} diff --git a/packages/node/warpsync/warpsyncproto/message.pb.go b/packages/node/warpsync/warpsyncproto/message.pb.go new file mode 100644 index 0000000000..2cba20abba --- /dev/null +++ b/packages/node/warpsync/warpsyncproto/message.pb.go @@ -0,0 +1,831 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.12.4 +// source: packages/node/warpsync/warpsyncproto/message.proto + +package warpsyncproto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Packet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Body: + // *Packet_EpochBlocksStart + // *Packet_EpochBlocksBatch + // *Packet_EpochBlocksEnd + // *Packet_EpochBlocksRequest + // *Packet_EpochCommitment + // *Packet_EpochCommitmentRequest + // *Packet_Negotiation + Body isPacket_Body `protobuf_oneof:"body"` +} + +func (x *Packet) Reset() { + *x = Packet{} + if protoimpl.UnsafeEnabled { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Packet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Packet) ProtoMessage() {} + +func (x *Packet) ProtoReflect() protoreflect.Message { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Packet.ProtoReflect.Descriptor instead. +func (*Packet) Descriptor() ([]byte, []int) { + return file_packages_node_warpsync_warpsyncproto_message_proto_rawDescGZIP(), []int{0} +} + +func (m *Packet) GetBody() isPacket_Body { + if m != nil { + return m.Body + } + return nil +} + +func (x *Packet) GetEpochBlocksStart() *EpochBlocksStart { + if x, ok := x.GetBody().(*Packet_EpochBlocksStart); ok { + return x.EpochBlocksStart + } + return nil +} + +func (x *Packet) GetEpochBlocksBatch() *EpochBlocksBatch { + if x, ok := x.GetBody().(*Packet_EpochBlocksBatch); ok { + return x.EpochBlocksBatch + } + return nil +} + +func (x *Packet) GetEpochBlocksEnd() *EpochBlocksEnd { + if x, ok := x.GetBody().(*Packet_EpochBlocksEnd); ok { + return x.EpochBlocksEnd + } + return nil +} + +func (x *Packet) GetEpochBlocksRequest() *EpochBlocksRequest { + if x, ok := x.GetBody().(*Packet_EpochBlocksRequest); ok { + return x.EpochBlocksRequest + } + return nil +} + +func (x *Packet) GetEpochCommitment() *EpochCommittment { + if x, ok := x.GetBody().(*Packet_EpochCommitment); ok { + return x.EpochCommitment + } + return nil +} + +func (x *Packet) GetEpochCommitmentRequest() *EpochCommittmentRequest { + if x, ok := x.GetBody().(*Packet_EpochCommitmentRequest); ok { + return x.EpochCommitmentRequest + } + return nil +} + +func (x *Packet) GetNegotiation() *Negotiation { + if x, ok := x.GetBody().(*Packet_Negotiation); ok { + return x.Negotiation + } + return nil +} + +type isPacket_Body interface { + isPacket_Body() +} + +type Packet_EpochBlocksStart struct { + EpochBlocksStart *EpochBlocksStart `protobuf:"bytes,1,opt,name=epochBlocksStart,proto3,oneof"` +} + +type Packet_EpochBlocksBatch struct { + EpochBlocksBatch *EpochBlocksBatch `protobuf:"bytes,2,opt,name=epochBlocksBatch,proto3,oneof"` +} + +type Packet_EpochBlocksEnd struct { + EpochBlocksEnd *EpochBlocksEnd `protobuf:"bytes,3,opt,name=epochBlocksEnd,proto3,oneof"` +} + +type Packet_EpochBlocksRequest struct { + EpochBlocksRequest *EpochBlocksRequest `protobuf:"bytes,4,opt,name=epochBlocksRequest,proto3,oneof"` +} + +type Packet_EpochCommitment struct { + EpochCommitment *EpochCommittment `protobuf:"bytes,5,opt,name=epochCommitment,proto3,oneof"` +} + +type Packet_EpochCommitmentRequest struct { + EpochCommitmentRequest *EpochCommittmentRequest `protobuf:"bytes,6,opt,name=epochCommitmentRequest,proto3,oneof"` +} + +type Packet_Negotiation struct { + Negotiation *Negotiation `protobuf:"bytes,7,opt,name=negotiation,proto3,oneof"` +} + +func (*Packet_EpochBlocksStart) isPacket_Body() {} + +func (*Packet_EpochBlocksBatch) isPacket_Body() {} + +func (*Packet_EpochBlocksEnd) isPacket_Body() {} + +func (*Packet_EpochBlocksRequest) isPacket_Body() {} + +func (*Packet_EpochCommitment) isPacket_Body() {} + +func (*Packet_EpochCommitmentRequest) isPacket_Body() {} + +func (*Packet_Negotiation) isPacket_Body() {} + +type EpochBlocksStart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EI int64 `protobuf:"varint,1,opt,name=EI,proto3" json:"EI,omitempty"` + EC []byte `protobuf:"bytes,2,opt,name=EC,proto3" json:"EC,omitempty"` + BlocksCount int64 `protobuf:"varint,3,opt,name=blocksCount,proto3" json:"blocksCount,omitempty"` +} + +func (x *EpochBlocksStart) Reset() { + *x = EpochBlocksStart{} + if protoimpl.UnsafeEnabled { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EpochBlocksStart) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EpochBlocksStart) ProtoMessage() {} + +func (x *EpochBlocksStart) ProtoReflect() protoreflect.Message { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EpochBlocksStart.ProtoReflect.Descriptor instead. +func (*EpochBlocksStart) Descriptor() ([]byte, []int) { + return file_packages_node_warpsync_warpsyncproto_message_proto_rawDescGZIP(), []int{1} +} + +func (x *EpochBlocksStart) GetEI() int64 { + if x != nil { + return x.EI + } + return 0 +} + +func (x *EpochBlocksStart) GetEC() []byte { + if x != nil { + return x.EC + } + return nil +} + +func (x *EpochBlocksStart) GetBlocksCount() int64 { + if x != nil { + return x.BlocksCount + } + return 0 +} + +type EpochBlocksBatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EI int64 `protobuf:"varint,1,opt,name=EI,proto3" json:"EI,omitempty"` + EC []byte `protobuf:"bytes,2,opt,name=EC,proto3" json:"EC,omitempty"` + Blocks [][]byte `protobuf:"bytes,3,rep,name=blocks,proto3" json:"blocks,omitempty"` +} + +func (x *EpochBlocksBatch) Reset() { + *x = EpochBlocksBatch{} + if protoimpl.UnsafeEnabled { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EpochBlocksBatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EpochBlocksBatch) ProtoMessage() {} + +func (x *EpochBlocksBatch) ProtoReflect() protoreflect.Message { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EpochBlocksBatch.ProtoReflect.Descriptor instead. +func (*EpochBlocksBatch) Descriptor() ([]byte, []int) { + return file_packages_node_warpsync_warpsyncproto_message_proto_rawDescGZIP(), []int{2} +} + +func (x *EpochBlocksBatch) GetEI() int64 { + if x != nil { + return x.EI + } + return 0 +} + +func (x *EpochBlocksBatch) GetEC() []byte { + if x != nil { + return x.EC + } + return nil +} + +func (x *EpochBlocksBatch) GetBlocks() [][]byte { + if x != nil { + return x.Blocks + } + return nil +} + +type EpochBlocksEnd struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EI int64 `protobuf:"varint,1,opt,name=EI,proto3" json:"EI,omitempty"` + EC []byte `protobuf:"bytes,2,opt,name=EC,proto3" json:"EC,omitempty"` + StateMutationRoot []byte `protobuf:"bytes,3,opt,name=stateMutationRoot,proto3" json:"stateMutationRoot,omitempty"` + StateRoot []byte `protobuf:"bytes,4,opt,name=stateRoot,proto3" json:"stateRoot,omitempty"` + ManaRoot []byte `protobuf:"bytes,5,opt,name=manaRoot,proto3" json:"manaRoot,omitempty"` +} + +func (x *EpochBlocksEnd) Reset() { + *x = EpochBlocksEnd{} + if protoimpl.UnsafeEnabled { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EpochBlocksEnd) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EpochBlocksEnd) ProtoMessage() {} + +func (x *EpochBlocksEnd) ProtoReflect() protoreflect.Message { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EpochBlocksEnd.ProtoReflect.Descriptor instead. +func (*EpochBlocksEnd) Descriptor() ([]byte, []int) { + return file_packages_node_warpsync_warpsyncproto_message_proto_rawDescGZIP(), []int{3} +} + +func (x *EpochBlocksEnd) GetEI() int64 { + if x != nil { + return x.EI + } + return 0 +} + +func (x *EpochBlocksEnd) GetEC() []byte { + if x != nil { + return x.EC + } + return nil +} + +func (x *EpochBlocksEnd) GetStateMutationRoot() []byte { + if x != nil { + return x.StateMutationRoot + } + return nil +} + +func (x *EpochBlocksEnd) GetStateRoot() []byte { + if x != nil { + return x.StateRoot + } + return nil +} + +func (x *EpochBlocksEnd) GetManaRoot() []byte { + if x != nil { + return x.ManaRoot + } + return nil +} + +type EpochBlocksRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EI int64 `protobuf:"varint,1,opt,name=EI,proto3" json:"EI,omitempty"` + EC []byte `protobuf:"bytes,2,opt,name=EC,proto3" json:"EC,omitempty"` +} + +func (x *EpochBlocksRequest) Reset() { + *x = EpochBlocksRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EpochBlocksRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EpochBlocksRequest) ProtoMessage() {} + +func (x *EpochBlocksRequest) ProtoReflect() protoreflect.Message { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EpochBlocksRequest.ProtoReflect.Descriptor instead. +func (*EpochBlocksRequest) Descriptor() ([]byte, []int) { + return file_packages_node_warpsync_warpsyncproto_message_proto_rawDescGZIP(), []int{4} +} + +func (x *EpochBlocksRequest) GetEI() int64 { + if x != nil { + return x.EI + } + return 0 +} + +func (x *EpochBlocksRequest) GetEC() []byte { + if x != nil { + return x.EC + } + return nil +} + +type EpochCommittment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EI int64 `protobuf:"varint,1,opt,name=EI,proto3" json:"EI,omitempty"` + PrevEC []byte `protobuf:"bytes,2,opt,name=prevEC,proto3" json:"prevEC,omitempty"` + ECR []byte `protobuf:"bytes,3,opt,name=ECR,proto3" json:"ECR,omitempty"` +} + +func (x *EpochCommittment) Reset() { + *x = EpochCommittment{} + if protoimpl.UnsafeEnabled { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EpochCommittment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EpochCommittment) ProtoMessage() {} + +func (x *EpochCommittment) ProtoReflect() protoreflect.Message { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EpochCommittment.ProtoReflect.Descriptor instead. +func (*EpochCommittment) Descriptor() ([]byte, []int) { + return file_packages_node_warpsync_warpsyncproto_message_proto_rawDescGZIP(), []int{5} +} + +func (x *EpochCommittment) GetEI() int64 { + if x != nil { + return x.EI + } + return 0 +} + +func (x *EpochCommittment) GetPrevEC() []byte { + if x != nil { + return x.PrevEC + } + return nil +} + +func (x *EpochCommittment) GetECR() []byte { + if x != nil { + return x.ECR + } + return nil +} + +type EpochCommittmentRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EI int64 `protobuf:"varint,1,opt,name=EI,proto3" json:"EI,omitempty"` +} + +func (x *EpochCommittmentRequest) Reset() { + *x = EpochCommittmentRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EpochCommittmentRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EpochCommittmentRequest) ProtoMessage() {} + +func (x *EpochCommittmentRequest) ProtoReflect() protoreflect.Message { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EpochCommittmentRequest.ProtoReflect.Descriptor instead. +func (*EpochCommittmentRequest) Descriptor() ([]byte, []int) { + return file_packages_node_warpsync_warpsyncproto_message_proto_rawDescGZIP(), []int{6} +} + +func (x *EpochCommittmentRequest) GetEI() int64 { + if x != nil { + return x.EI + } + return 0 +} + +type Negotiation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Negotiation) Reset() { + *x = Negotiation{} + if protoimpl.UnsafeEnabled { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Negotiation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Negotiation) ProtoMessage() {} + +func (x *Negotiation) ProtoReflect() protoreflect.Message { + mi := &file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Negotiation.ProtoReflect.Descriptor instead. +func (*Negotiation) Descriptor() ([]byte, []int) { + return file_packages_node_warpsync_warpsyncproto_message_proto_rawDescGZIP(), []int{7} +} + +var File_packages_node_warpsync_warpsyncproto_message_proto protoreflect.FileDescriptor + +var file_packages_node_warpsync_warpsyncproto_message_proto_rawDesc = []byte{ + 0x0a, 0x32, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, + 0x77, 0x61, 0x72, 0x70, 0x73, 0x79, 0x6e, 0x63, 0x2f, 0x77, 0x61, 0x72, 0x70, 0x73, 0x79, 0x6e, + 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x77, 0x61, 0x72, 0x70, 0x73, 0x79, 0x6e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x04, 0x0a, 0x06, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x4d, + 0x0a, 0x10, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x77, 0x61, 0x72, 0x70, 0x73, + 0x79, 0x6e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x48, 0x00, 0x52, 0x10, 0x65, 0x70, 0x6f, + 0x63, 0x68, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x4d, 0x0a, + 0x10, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x77, 0x61, 0x72, 0x70, 0x73, 0x79, + 0x6e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x10, 0x65, 0x70, 0x6f, 0x63, + 0x68, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x47, 0x0a, 0x0e, + 0x65, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x45, 0x6e, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x77, 0x61, 0x72, 0x70, 0x73, 0x79, 0x6e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, + 0x45, 0x6e, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x73, 0x45, 0x6e, 0x64, 0x12, 0x53, 0x0a, 0x12, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x77, 0x61, 0x72, 0x70, 0x73, 0x79, 0x6e, 0x63, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x12, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x0f, 0x65, 0x70, + 0x6f, 0x63, 0x68, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x77, 0x61, 0x72, 0x70, 0x73, 0x79, 0x6e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, + 0x6d, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x60, 0x0a, 0x16, 0x65, 0x70, 0x6f, 0x63, 0x68, + 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x77, 0x61, 0x72, 0x70, 0x73, 0x79, + 0x6e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x00, 0x52, 0x16, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x6e, 0x65, 0x67, + 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x77, 0x61, 0x72, 0x70, 0x73, 0x79, 0x6e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4e, + 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x65, + 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x22, 0x54, 0x0a, 0x10, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x45, 0x49, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x02, 0x45, 0x49, 0x12, 0x0e, 0x0a, 0x02, 0x45, 0x43, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x02, 0x45, 0x43, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x4a, 0x0a, 0x10, 0x45, 0x70, 0x6f, 0x63, 0x68, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x0e, 0x0a, 0x02, 0x45, + 0x49, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x45, 0x49, 0x12, 0x0e, 0x0a, 0x02, 0x45, + 0x43, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x45, 0x43, 0x12, 0x16, 0x0a, 0x06, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x73, 0x22, 0x98, 0x01, 0x0a, 0x0e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x73, 0x45, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x45, 0x49, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x02, 0x45, 0x49, 0x12, 0x0e, 0x0a, 0x02, 0x45, 0x43, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x02, 0x45, 0x43, 0x12, 0x2c, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4d, + 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, + 0x6f, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x61, 0x6e, 0x61, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6d, 0x61, 0x6e, 0x61, 0x52, 0x6f, 0x6f, 0x74, 0x22, 0x34, + 0x0a, 0x12, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x45, 0x49, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x45, 0x49, 0x12, 0x0e, 0x0a, 0x02, 0x45, 0x43, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x02, 0x45, 0x43, 0x22, 0x4c, 0x0a, 0x10, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x45, 0x49, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x45, 0x49, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x76, + 0x45, 0x43, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x72, 0x65, 0x76, 0x45, 0x43, + 0x12, 0x10, 0x0a, 0x03, 0x45, 0x43, 0x52, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x45, + 0x43, 0x52, 0x22, 0x29, 0x0a, 0x17, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, + 0x02, 0x45, 0x49, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x45, 0x49, 0x22, 0x0d, 0x0a, + 0x0b, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x46, 0x5a, 0x44, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x6f, 0x74, 0x61, 0x6c, + 0x65, 0x64, 0x67, 0x65, 0x72, 0x2f, 0x67, 0x6f, 0x73, 0x68, 0x69, 0x6d, 0x6d, 0x65, 0x72, 0x2f, + 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x77, 0x61, + 0x72, 0x70, 0x73, 0x79, 0x6e, 0x63, 0x2f, 0x77, 0x61, 0x72, 0x70, 0x73, 0x79, 0x6e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_packages_node_warpsync_warpsyncproto_message_proto_rawDescOnce sync.Once + file_packages_node_warpsync_warpsyncproto_message_proto_rawDescData = file_packages_node_warpsync_warpsyncproto_message_proto_rawDesc +) + +func file_packages_node_warpsync_warpsyncproto_message_proto_rawDescGZIP() []byte { + file_packages_node_warpsync_warpsyncproto_message_proto_rawDescOnce.Do(func() { + file_packages_node_warpsync_warpsyncproto_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_packages_node_warpsync_warpsyncproto_message_proto_rawDescData) + }) + return file_packages_node_warpsync_warpsyncproto_message_proto_rawDescData +} + +var file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_packages_node_warpsync_warpsyncproto_message_proto_goTypes = []interface{}{ + (*Packet)(nil), // 0: warpsyncproto.Packet + (*EpochBlocksStart)(nil), // 1: warpsyncproto.EpochBlocksStart + (*EpochBlocksBatch)(nil), // 2: warpsyncproto.EpochBlocksBatch + (*EpochBlocksEnd)(nil), // 3: warpsyncproto.EpochBlocksEnd + (*EpochBlocksRequest)(nil), // 4: warpsyncproto.EpochBlocksRequest + (*EpochCommittment)(nil), // 5: warpsyncproto.EpochCommittment + (*EpochCommittmentRequest)(nil), // 6: warpsyncproto.EpochCommittmentRequest + (*Negotiation)(nil), // 7: warpsyncproto.Negotiation +} +var file_packages_node_warpsync_warpsyncproto_message_proto_depIdxs = []int32{ + 1, // 0: warpsyncproto.Packet.epochBlocksStart:type_name -> warpsyncproto.EpochBlocksStart + 2, // 1: warpsyncproto.Packet.epochBlocksBatch:type_name -> warpsyncproto.EpochBlocksBatch + 3, // 2: warpsyncproto.Packet.epochBlocksEnd:type_name -> warpsyncproto.EpochBlocksEnd + 4, // 3: warpsyncproto.Packet.epochBlocksRequest:type_name -> warpsyncproto.EpochBlocksRequest + 5, // 4: warpsyncproto.Packet.epochCommitment:type_name -> warpsyncproto.EpochCommittment + 6, // 5: warpsyncproto.Packet.epochCommitmentRequest:type_name -> warpsyncproto.EpochCommittmentRequest + 7, // 6: warpsyncproto.Packet.negotiation:type_name -> warpsyncproto.Negotiation + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_packages_node_warpsync_warpsyncproto_message_proto_init() } +func file_packages_node_warpsync_warpsyncproto_message_proto_init() { + if File_packages_node_warpsync_warpsyncproto_message_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Packet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EpochBlocksStart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EpochBlocksBatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EpochBlocksEnd); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EpochBlocksRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EpochCommittment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EpochCommittmentRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Negotiation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Packet_EpochBlocksStart)(nil), + (*Packet_EpochBlocksBatch)(nil), + (*Packet_EpochBlocksEnd)(nil), + (*Packet_EpochBlocksRequest)(nil), + (*Packet_EpochCommitment)(nil), + (*Packet_EpochCommitmentRequest)(nil), + (*Packet_Negotiation)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_packages_node_warpsync_warpsyncproto_message_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_packages_node_warpsync_warpsyncproto_message_proto_goTypes, + DependencyIndexes: file_packages_node_warpsync_warpsyncproto_message_proto_depIdxs, + MessageInfos: file_packages_node_warpsync_warpsyncproto_message_proto_msgTypes, + }.Build() + File_packages_node_warpsync_warpsyncproto_message_proto = out.File + file_packages_node_warpsync_warpsyncproto_message_proto_rawDesc = nil + file_packages_node_warpsync_warpsyncproto_message_proto_goTypes = nil + file_packages_node_warpsync_warpsyncproto_message_proto_depIdxs = nil +} diff --git a/packages/node/warpsync/warpsyncproto/message.proto b/packages/node/warpsync/warpsyncproto/message.proto new file mode 100644 index 0000000000..a450be30dd --- /dev/null +++ b/packages/node/warpsync/warpsyncproto/message.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +option go_package = "github.com/iotaledger/goshimmer/packages/node/warpsync/warpsyncproto"; + +package warpsyncproto; + +message Packet { + oneof body { + EpochBlocksStart epochBlocksStart = 1; + EpochBlocksBatch epochBlocksBatch = 2; + EpochBlocksEnd epochBlocksEnd = 3; + EpochBlocksRequest epochBlocksRequest = 4; + EpochCommittment epochCommitment = 5; + EpochCommittmentRequest epochCommitmentRequest = 6; + Negotiation negotiation = 7; + } +} + +message EpochBlocksStart { + int64 EI = 1; + bytes EC = 2; + int64 blocksCount = 3; +} + +message EpochBlocksBatch { + int64 EI = 1; + bytes EC = 2; + repeated bytes blocks = 3; +} + +message EpochBlocksEnd { + int64 EI = 1; + bytes EC = 2; + bytes stateMutationRoot = 3; + bytes stateRoot = 4; + bytes manaRoot = 5; +} + +message EpochBlocksRequest { + int64 EI = 1; + bytes EC = 2; +} + +message EpochCommittment { + int64 EI = 1; + bytes prevEC = 2; + bytes ECR = 3; +} + +message EpochCommittmentRequest { + int64 EI = 1; +} + +message Negotiation {} \ No newline at end of file diff --git a/plugins/autopeering/discovery/parameters.go b/plugins/autopeering/discovery/parameters.go index 2e08146383..d0cc4832c0 100644 --- a/plugins/autopeering/discovery/parameters.go +++ b/plugins/autopeering/discovery/parameters.go @@ -5,7 +5,7 @@ import "github.com/iotaledger/goshimmer/plugins/config" // ParametersDefinitionDiscovery contains the definition of configuration parameters used by the autopeering peer discovery. type ParametersDefinitionDiscovery struct { // NetworkVersion defines the config flag of the network version. - NetworkVersion uint32 `default:"62" usage:"autopeering network version"` + NetworkVersion uint32 `default:"63" usage:"autopeering network version"` // EntryNodes defines the config flag of the entry nodes. EntryNodes []string `default:"2PV5487xMw5rasGBXXWeqSi4hLz7r19YBt8Y1TGAsQbj@analysisentry-01.devnet.shimmer.iota.cafe:15626,5EDH4uY78EA6wrBkHHAVBWBMDt7EcksRq6pjzipoW15B@entry-0.devnet.tanglebay.com:14646,CAB87iQZR6BjBrCgEBupQJ4gpEBgvGKKv3uuGVRBKb4n@entry-1.devnet.tanglebay.com:14646" usage:"list of trusted entry nodes for auto peering"` diff --git a/plugins/autopeering/plugin.go b/plugins/autopeering/plugin.go index 7da7fd3427..1be5e5df1a 100644 --- a/plugins/autopeering/plugin.go +++ b/plugins/autopeering/plugin.go @@ -108,7 +108,8 @@ func configureGossipIntegration() { Plugin.Logger().Debugw("error dropping neighbor", "id", ev.DroppedID, "err", err) } })) - deps.Selection.Events().IncomingPeering.Attach(event.NewClosure(func(ev *selection.PeeringEvent) { + // We need to allocate synchronously the resources to accomodate incoming stream requests. + deps.Selection.Events().IncomingPeering.Hook(event.NewClosure(func(ev *selection.PeeringEvent) { if !ev.Status { return // ignore rejected peering } diff --git a/plugins/banner/plugin.go b/plugins/banner/plugin.go index 803e817470..5022125058 100644 --- a/plugins/banner/plugin.go +++ b/plugins/banner/plugin.go @@ -15,7 +15,7 @@ var ( Plugin = node.NewPlugin(PluginName, nil, node.Enabled, configure, run) // AppVersion version number - AppVersion = "v0.9.4" + AppVersion = "v0.9.5" // SimplifiedAppVersion is the version number without commit hash SimplifiedAppVersion = simplifiedVersion(AppVersion) ) diff --git a/plugins/blocklayer/acceptance_gadget.go b/plugins/blocklayer/acceptance_gadget.go index eacb518233..616eeaae1c 100644 --- a/plugins/blocklayer/acceptance_gadget.go +++ b/plugins/blocklayer/acceptance_gadget.go @@ -1,6 +1,7 @@ package blocklayer import ( + "github.com/iotaledger/goshimmer/packages/core/epoch" "github.com/iotaledger/hive.go/core/generics/event" "github.com/iotaledger/hive.go/core/identity" @@ -29,6 +30,7 @@ func configureFinality() { // we need to update the WeightProvider on confirmation acceptanceGadget.Events().BlockAccepted.Attach(event.NewClosure(func(event *tangleold.BlockAcceptedEvent) { - deps.Tangle.WeightProvider.Update(event.Block.IssuingTime(), identity.NewID(event.Block.IssuerPublicKey())) + ei := epoch.IndexFromTime(event.Block.IssuingTime()) + deps.Tangle.WeightProvider.Update(ei, identity.NewID(event.Block.IssuerPublicKey())) })) } diff --git a/plugins/blocklayer/mana_plugin.go b/plugins/blocklayer/mana_plugin.go index 84743ac4df..30e3276f50 100644 --- a/plugins/blocklayer/mana_plugin.go +++ b/plugins/blocklayer/mana_plugin.go @@ -37,20 +37,13 @@ const ( var ( // ManaPlugin is the plugin instance of the mana plugin. - ManaPlugin = node.NewPlugin(PluginName, nil, node.Enabled, configureManaPlugin, runManaPlugin) - manaLogger *logger.Logger - baseManaVectors map[mana.Type]mana.BaseManaVector - storages map[mana.Type]*objectstorage.ObjectStorage[*mana.PersistableBaseMana] - allowedPledgeNodes map[mana.Type]AllowedPledge - // consensusBaseManaPastVectorStorage *objectstorage.ObjectStorage - // consensusBaseManaPastVectorMetadataStorage *objectstorage.ObjectStorage - // consensusEventsLogStorage *objectstorage.ObjectStorage - // consensusEventsLogsStorageSize atomic.Uint32. + ManaPlugin = node.NewPlugin(PluginName, nil, node.Enabled, configureManaPlugin, runManaPlugin) + manaLogger *logger.Logger + baseManaVectors map[mana.Type]mana.BaseManaVector + storages map[mana.Type]*objectstorage.ObjectStorage[*mana.PersistableBaseMana] + allowedPledgeNodes map[mana.Type]AllowedPledge onTransactionAcceptedClosure *event.Closure[*ledger.TransactionAcceptedEvent] onManaVectorToUpdateClosure *event.Closure[*notarization.ManaVectorUpdateEvent] - // onPledgeEventClosure *events.Closure - // onRevokeEventClosure *events.Closure - // debuggingEnabled bool. ) func init() { @@ -68,10 +61,13 @@ func configureManaPlugin(*node.Plugin) { onTransactionAcceptedClosure = event.NewClosure(func(event *ledger.TransactionAcceptedEvent) { onTransactionAccepted(event.TransactionID) }) onManaVectorToUpdateClosure = event.NewClosure(func(event *notarization.ManaVectorUpdateEvent) { - baseManaVectors[mana.ConsensusMana].BookEpoch(event.EpochDiffCreated, event.EpochDiffSpent) + manaVectorEI := event.EI - epoch.Index(ManaParameters.EpochDelay) + if manaVectorEI < 1 { + return + } + spent, created := deps.NotarizationMgr.GetEpochDiff(manaVectorEI) + baseManaVectors[mana.ConsensusMana].BookEpoch(created, spent) }) - // onPledgeEventClosure = events.NewClosure(logPledgeEvent) - // onRevokeEventClosure = events.NewClosure(logRevokeEvent) allowedPledgeNodes = make(map[mana.Type]AllowedPledge) baseManaVectors = make(map[mana.Type]mana.BaseManaVector) @@ -87,11 +83,6 @@ func configureManaPlugin(*node.Plugin) { storages[mana.ResearchAccess] = objectstorage.NewStructStorage[mana.PersistableBaseMana](objectstorage.NewStoreWithRealm(store, db_pkg.PrefixMana, mana.PrefixAccessResearch)) storages[mana.ResearchConsensus] = objectstorage.NewStructStorage[mana.PersistableBaseMana](objectstorage.NewStoreWithRealm(store, db_pkg.PrefixMana, mana.PrefixConsensusResearch)) } - // consensusEventsLogStorage = osFactory.New(mana.PrefixEventStorage, mana.FromEventObjectStorage) - // consensusEventsLogsStorageSize.Store(getConsensusEventLogsStorageSize()) - // manaLogger.Infof("read %d mana events from storage", consensusEventsLogsStorageSize.Load()) - // consensusBaseManaPastVectorStorage = osFactory.New(mana.PrefixConsensusPastVector, mana.FromObjectStorage) - // consensusBaseManaPastVectorMetadataStorage = osFactory.New(mana.PrefixConsensusPastMetadata, mana.FromMetadataObjectStorage) err := verifyPledgeNodes() if err != nil { @@ -109,20 +100,6 @@ func configureEvents() { // mana.Events().Revoked.Attach(onRevokeEventClosure) } -// func logPledgeEvent(ev *mana.PledgedEvent) { -// if ev.ManaType == mana.ConsensusMana { -// consensusEventsLogStorage.Store(ev.ToPersistable()).Release() -// consensusEventsLogsStorageSize.Inc() -// } -// } -// -// func logRevokeEvent(ev *mana.RevokedEvent) { -// if ev.ManaType == mana.ConsensusMana { -// consensusEventsLogStorage.Store(ev.ToPersistable()).Release() -// consensusEventsLogsStorageSize.Inc() -// } -// } - func onTransactionAccepted(transactionID utxo.TransactionID) { deps.Tangle.Ledger.Storage.CachedTransaction(transactionID).Consume(func(transaction utxo.Transaction) { // holds all info mana pkg needs for correct mana calculations from the transaction @@ -218,28 +195,19 @@ func runManaPlugin(_ *node.Plugin) { } - epochDiffsConsumer := func(header *ledger.SnapshotHeader, epochDiffs map[epoch.Index]*ledger.EpochDiff) { + epochDiffsConsumer := func(diff *ledger.EpochDiff) { // We fix the cMana vector a few epochs in the past with respect of the latest epoch in the snapshot. - for ei := header.FullEpochIndex + 1; ei <= cManaTargetEpoch; ei++ { - diff, exists := epochDiffs[ei] - if !exists { - panic(fmt.Sprintf("diff with index %d missing from snapshot", ei)) - } - processOutputs(diff.Created(), consensusManaByNode, true /* areCreated */) - processOutputs(diff.Created(), accessManaByNode, true /* areCreated */) - processOutputs(diff.Spent(), consensusManaByNode, false /* areCreated */) - processOutputs(diff.Spent(), accessManaByNode, false /* areCreated */) - } + + processOutputs(diff.Created(), consensusManaByNode, true /* areCreated */) + processOutputs(diff.Created(), accessManaByNode, true /* areCreated */) + processOutputs(diff.Spent(), consensusManaByNode, false /* areCreated */) + processOutputs(diff.Spent(), accessManaByNode, false /* areCreated */) // Only the aMana will be loaded until the latest snapshot's epoch - for ei := cManaTargetEpoch + 1; ei <= header.DiffEpochIndex; ei++ { - diff, exists := epochDiffs[ei] - if !exists { - panic(fmt.Sprintf("diff with index %d missing from snapshot", ei)) - } - processOutputs(diff.Created(), accessManaByNode, true /* areCreated */) - processOutputs(diff.Spent(), accessManaByNode, false /* areCreated */) - } + + processOutputs(diff.Created(), accessManaByNode, true /* areCreated */) + processOutputs(diff.Spent(), accessManaByNode, false /* areCreated */) + } headerConsumer := func(header *ledger.SnapshotHeader) { @@ -249,25 +217,20 @@ func runManaPlugin(_ *node.Plugin) { } } - - if err := snapshot.LoadSnapshot(Parameters.Snapshot.File, headerConsumer, utxoStatesConsumer, epochDiffsConsumer); err != nil { + emptySepsConsumer := func(*snapshot.SolidEntryPoints) {} + + if err := snapshot.LoadSnapshot( + Parameters.Snapshot.File, + headerConsumer, + emptySepsConsumer, + utxoStatesConsumer, + epochDiffsConsumer, + deps.Tangle.WeightProvider.LoadActiveNodes, + ); err != nil { Plugin.Panic("could not load snapshot from file", Parameters.Snapshot.File, err) } baseManaVectors[mana.ConsensusMana].InitializeWithData(consensusManaByNode) baseManaVectors[mana.AccessMana].InitializeWithData(accessManaByNode) - - // initialize cMana WeightProvider with snapshot - // TODO: consume the activity record from the snapshot to determine which nodes were active at the time of the snapshot - t := deps.Tangle.Options.GenesisTime - genesisNodeID := identity.ID{} - for nodeID := range GetCMana() { - if nodeID == genesisNodeID { - continue - } - deps.Tangle.WeightProvider.Update(t, nodeID) - } - - manaLogger.Infof("MANA: read snapshot from %s", Parameters.Snapshot.File) } } pruneStorages() @@ -372,6 +335,15 @@ func GetCMana() map[identity.ID]float64 { return m } +// GetConfirmedEI is a wrapper for the weightProvider to get confirmed epoch index. +func GetConfirmedEI() epoch.Index { + ei, err := deps.NotarizationMgr.LatestConfirmedEpochIndex() + if err != nil { + panic(err) + } + return ei +} + // GetTotalMana returns sum of mana of all nodes in the network. func GetTotalMana(manaType mana.Type, optionalUpdateTime ...time.Time) (float64, time.Time, error) { if !QueryAllowed() { @@ -507,277 +479,6 @@ func verifyPledgeNodes() error { return nil } -// // GetLoggedEvents gets the events logs for the node IDs and time frame specified. If none is specified, it returns the logs for all nodes. -// func GetLoggedEvents(identityIDs []identity.ID, startTime time.Time, endTime time.Time) (map[identity.ID]*EventsLogs, error) { -// logs := make(map[identity.ID]*EventsLogs) -// lookup := make(map[identity.ID]bool) -// getAll := true -// -// if len(identityIDs) > 0 { -// getAll = false -// for _, nodeID := range identityIDs { -// lookup[nodeID] = true -// } -// } -// -// var err error -// consensusEventsLogStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool { -// cachedPe := &mana.CachedPersistableEvent{CachedObject: cachedObject} -// defer cachedPe.Release() -// pbm := cachedPe.Unwrap() -// -// if !getAll { -// if !lookup[pbm.NodeID] { -// return true -// } -// } -// -// if _, found := logs[pbm.NodeID]; !found { -// logs[pbm.NodeID] = &EventsLogs{} -// } -// -// var ev mana.Event -// ev, err = mana.FromPersistableEvent(pbm) -// if err != nil { -// return false -// } -// -// if ev.Timestamp().Before(startTime) || ev.Timestamp().After(endTime) { -// return true -// } -// switch ev.Type() { -// case mana.EventTypePledge: -// logs[pbm.NodeID].Pledge = append(logs[pbm.NodeID].Pledge, ev.(*mana.PledgedEvent)) -// case mana.EventTypeRevoke: -// logs[pbm.NodeID].Revoke = append(logs[pbm.NodeID].Revoke, ev.(*mana.RevokedEvent)) -// default: -// err = mana.ErrUnknownManaEvent -// return false -// } -// return true -// }) -// -// for ID := range logs { -// sort.Slice(logs[ID].Pledge, func(i, j int) bool { -// return logs[ID].Pledge[i].Time.Before(logs[ID].Pledge[j].Time) -// }) -// sort.Slice(logs[ID].Revoke, func(i, j int) bool { -// return logs[ID].Revoke[i].Time.Before(logs[ID].Revoke[j].Time) -// }) -// } -// -// return logs, err -// } -// -// // GetPastConsensusManaVectorMetadata gets the past consensus mana vector metadata. -// func GetPastConsensusManaVectorMetadata() *mana.ConsensusBasePastManaVectorMetadata { -// cachedObj := consensusBaseManaPastVectorMetadataStorage.Load([]byte(mana.ConsensusBaseManaPastVectorMetadataStorageKey)) -// cachedMetadata := &mana.CachedConsensusBasePastManaVectorMetadata{CachedObject: cachedObj} -// defer cachedMetadata.Release() -// return cachedMetadata.Unwrap() -// } -// -// // GetPastConsensusManaVector builds a consensus base mana vector in the past. -// func GetPastConsensusManaVector(t time.Time) (*mana.ConsensusBaseManaVector, []mana.Event, error) { -// baseManaVector, err := mana.NewBaseManaVector(mana.ConsensusMana) -// if err != nil { -// return nil, nil, err -// } -// cbmvPast := baseManaVector.(*mana.ConsensusBaseManaVector) -// cachedObj := consensusBaseManaPastVectorMetadataStorage.Load([]byte(mana.ConsensusBaseManaPastVectorMetadataStorageKey)) -// cachedMetadata := &mana.CachedConsensusBasePastManaVectorMetadata{CachedObject: cachedObj} -// defer cachedMetadata.Release() -// -// if cachedMetadata.Exists() { -// metadata := cachedMetadata.Unwrap() -// if t.After(metadata.Timestamp) { -// consensusBaseManaPastVectorStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool { -// cachedPbm := &mana.CachedPersistableBaseMana{CachedObject: cachedObject} -// defer cachedPbm.Release() -// p := cachedPbm.Unwrap() -// err = cbmvPast.FromPersistable(p) -// if err != nil { -// manaLogger.Errorf("error while restoring %s mana vector from storage: %w", mana.ConsensusMana.String(), err) -// baseManaVector, _ := mana.NewBaseManaVector(mana.ConsensusMana) -// cbmvPast = baseManaVector.(*mana.ConsensusBaseManaVector) -// return false -// } -// return true -// }) -// } -// } -// -// var eventLogs mana.EventSlice -// consensusEventsLogStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool { -// cachedPe := &mana.CachedPersistableEvent{CachedObject: cachedObject} -// defer cachedPe.Release() -// pe := cachedPe.Unwrap() -// if pe.Time.After(t) { -// return true -// } -// -// // already consumed in stored base mana vector. -// if cachedMetadata.Exists() && cbmvPast.Size() > 0 { -// metadata := cachedMetadata.Unwrap() -// if pe.Time.Before(metadata.Timestamp) { -// return true -// } -// } -// -// var ev mana.Event -// ev, err = mana.FromPersistableEvent(pe) -// if err != nil { -// return false -// } -// eventLogs = append(eventLogs, ev) -// return true -// }) -// if err != nil { -// return nil, nil, err -// } -// eventLogs.Sort() -// err = cbmvPast.BuildPastBaseVector(eventLogs, t) -// if err != nil { -// return nil, nil, err -// } -// -// err = cbmvPast.UpdateAll(t) -// if err != nil { -// return nil, nil, err -// } -// -// return cbmvPast, eventLogs, nil -// } -// -// func getConsensusEventLogsStorageSize() uint32 { -// var size uint32 -// consensusEventsLogStorage.ForEachKeyOnly(func(key []byte) bool { -// size++ -// return true -// }, objectstorage.WithIteratorSkipCache(true)) -// return size -// } -// -// func pruneConsensusEventLogsStorage() { -// if consensusEventsLogsStorageSize.Load() < maxConsensusEventsInStorage { -// return -// } -// -// cachedObj := consensusBaseManaPastVectorMetadataStorage.Load([]byte(mana.ConsensusBaseManaPastVectorMetadataStorageKey)) -// cachedMetadata := &mana.CachedConsensusBasePastManaVectorMetadata{CachedObject: cachedObj} -// defer cachedMetadata.Release() -// -// bmv, err := mana.NewBaseManaVector(mana.ConsensusMana) -// if err != nil { -// manaLogger.Errorf("error creating consensus base mana vector: %v", err) -// return -// } -// cbmvPast := bmv.(*mana.ConsensusBaseManaVector) -// if cachedMetadata.Exists() { -// consensusBaseManaPastVectorStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool { -// cachedPbm := &mana.CachedPersistableBaseMana{CachedObject: cachedObject} -// pbm := cachedPbm.Unwrap() -// if pbm != nil { -// err = cbmvPast.FromPersistable(pbm) -// if err != nil { -// return false -// } -// } -// return true -// }) -// if err != nil { -// manaLogger.Errorf("error reading stored consensus base mana vector: %v", err) -// return -// } -// } -// -// var eventLogs mana.EventSlice -// consensusEventsLogStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool { -// cachedPe := &mana.CachedPersistableEvent{CachedObject: cachedObject} -// defer cachedPe.Release() -// pe := cachedPe.Unwrap() -// var ev mana.Event -// ev, err = mana.FromPersistableEvent(pe) -// -// if cachedMetadata.Exists() { -// metadata := cachedMetadata.Unwrap() -// if ev.Timestamp().Before(metadata.Timestamp) { -// manaLogger.Errorf("consensus event storage contains event that is older, than the stored metadata timestamp %s: %s", metadata.Timestamp, ev.String()) -// return true -// } -// } -// -// if err != nil { -// return false -// } -// eventLogs = append(eventLogs, ev) -// return true -// }) -// if err != nil { -// manaLogger.Infof("error reading persistable events: %v", err) -// return -// } -// eventLogs.Sort() -// // we always want (maxConsensusEventsInStorage - slidingEventsInterval) number of events left -// deleteWindow := len(eventLogs) - (maxConsensusEventsInStorage - slidingEventsInterval) -// storageSizeInt := int(consensusEventsLogsStorageSize.Load()) -// if deleteWindow < 0 || deleteWindow > storageSizeInt { -// manaLogger.Errorf("invalid delete window %d for storage size %d, max storage size %d and sliding interval %d", -// deleteWindow, storageSizeInt, maxConsensusEventsInStorage, slidingEventsInterval) -// return -// } -// // Make sure to take related events. (we take deleteWindow oldest events) -// // Ensures that related events (same time) are not split between different intervals. -// prev := eventLogs[deleteWindow-1] -// var i int -// for i = deleteWindow; i < len(eventLogs); i++ { -// if !eventLogs[i].Timestamp().Equal(prev.Timestamp()) { -// break -// } -// prev = eventLogs[i] -// } -// toBePrunedEvents := eventLogs[:i] -// // TODO: later, when we have epochs, we have to make sure that `t` is before the epoch to be "finalized" next. -// // Otherwise, we won't be able to calculate the consensus mana for that epoch because we already pruned the events -// // leading up to it. -// t := toBePrunedEvents[len(toBePrunedEvents)-1].Timestamp() -// -// err = cbmvPast.BuildPastBaseVector(toBePrunedEvents, t) -// if err != nil { -// manaLogger.Errorf("error building past consensus base mana vector: %w", err) -// return -// } -// -// // store cbmv -// if err = consensusBaseManaPastVectorStorage.Prune(); err != nil { -// manaLogger.Errorf("error pruning consensus base mana vector storage: %w", err) -// return -// } -// for _, p := range cbmvPast.ToPersistables() { -// consensusBaseManaPastVectorStorage.Store(p).Release() -// } -// -// // store the metadata -// metadata := &mana.ConsensusBasePastManaVectorMetadata{ -// Timestamp: t, -// } -// -// if err = consensusBaseManaPastVectorMetadataStorage.Prune(); err != nil { -// manaLogger.Errorf("error pruning consensus base mana vector metadata storage: %w", err) -// return -// } -// consensusBaseManaPastVectorMetadataStorage.Store(metadata).Release() -// -// var entriesToDelete [][]byte -// for _, ev := range toBePrunedEvents { -// entriesToDelete = append(entriesToDelete, ev.ToPersistable().ObjectStorageKey()) -// } -// manaLogger.Infof("deleting %d events from consensus event storage", len(entriesToDelete)) -// consensusEventsLogStorage.DeleteEntriesFromStore(entriesToDelete) -// consensusEventsLogsStorageSize.Sub(uint32(len(entriesToDelete))) -// manaLogger.Infof("%d events remaining in consensus event storage", consensusEventsLogsStorageSize.Load()) -// } - func cleanupManaVectors() { vectorTypes := []mana.Type{mana.AccessMana, mana.ConsensusMana} if ManaParameters.EnableResearchVectors { diff --git a/plugins/blocklayer/notarization_plugin.go b/plugins/blocklayer/notarization_plugin.go index ff208e957b..680cbd2d48 100644 --- a/plugins/blocklayer/notarization_plugin.go +++ b/plugins/blocklayer/notarization_plugin.go @@ -52,11 +52,17 @@ func init() { } func configureNotarizationPlugin(plugin *node.Plugin) { + if Parameters.Snapshot.File != "" { + emptySepsConsumer := func(*snapshot.SolidEntryPoints) {} + emptyActivityConsumer := func(activityLogs epoch.SnapshotEpochActivity) {} + err := snapshot.LoadSnapshot(Parameters.Snapshot.File, notarizationDeps.Manager.LoadECandEIs, + emptySepsConsumer, notarizationDeps.Manager.LoadOutputsWithMetadata, - notarizationDeps.Manager.LoadEpochDiffs) + notarizationDeps.Manager.LoadEpochDiff, + emptyActivityConsumer) if err != nil { plugin.Panic("could not load snapshot file:", err) } @@ -78,9 +84,8 @@ func newNotarizationManager(deps notarizationManagerDependencies) *notarization. return notarization.NewManager( notarization.NewEpochCommitmentFactory(deps.Storage, deps.Tangle, NotarizationParameters.SnapshotDepth), deps.Tangle, - notarization.MinCommittableEpochAge(NotarizationParameters.MinEpochCommitableAge), + notarization.MinCommittableEpochAge(NotarizationParameters.MinEpochCommittableAge), notarization.BootstrapWindow(NotarizationParameters.BootstrapWindow), - notarization.ManaDelay(ManaParameters.EpochDelay), notarization.Log(Plugin.Logger())) } diff --git a/plugins/blocklayer/parameters.go b/plugins/blocklayer/parameters.go index 2bfdd9279b..334c563d3b 100644 --- a/plugins/blocklayer/parameters.go +++ b/plugins/blocklayer/parameters.go @@ -75,8 +75,8 @@ type SchedulerParametersDefinition struct { // NotarizationParametersDefinition contains the definition of the parameters used by the notarization plugin. type NotarizationParametersDefinition struct { - // MinEpochCommitableAge defines the min age of a commitable epoch. - MinEpochCommitableAge time.Duration `default:"1m" usage:"min age of a commitable epoch"` + // MinEpochCommittableAge defines the min age of a committable epoch. + MinEpochCommittableAge time.Duration `default:"1m" usage:"min age of a committable epoch"` // BootstrapWindow when notarization manager is considered to be bootstrapped BootstrapWindow time.Duration `default:"2m" usage:"when notarization manager is considered to be bootstrapped"` // SnapshotDepth defines how many epoch diffs are stored in the snapshot, starting from the full ledgerstate diff --git a/plugins/blocklayer/plugin.go b/plugins/blocklayer/plugin.go index 2281e87526..4937526bb5 100644 --- a/plugins/blocklayer/plugin.go +++ b/plugins/blocklayer/plugin.go @@ -114,7 +114,8 @@ func configure(plugin *node.Plugin) { deps.Tangle.Booker.Events.BlockBooked.Attach(event.NewClosure(func(event *tangleold.BlockBookedEvent) { deps.Tangle.Storage.Block(event.BlockID).Consume(func(block *tangleold.Block) { - deps.Tangle.WeightProvider.Update(block.IssuingTime(), identity.NewID(block.IssuerPublicKey())) + ei := epoch.IndexFromTime(block.IssuingTime()) + deps.Tangle.WeightProvider.Update(ei, identity.NewID(block.IssuerPublicKey())) }) })) @@ -153,21 +154,20 @@ func configure(plugin *node.Plugin) { } } - epochDiffsConsumer := func(header *ledger.SnapshotHeader, epochDiffs map[epoch.Index]*ledger.EpochDiff) { - err := deps.Tangle.Ledger.LoadEpochDiffs(header, epochDiffs) + epochDiffsConsumer := func(epochDiff *ledger.EpochDiff) { + err := deps.Tangle.Ledger.LoadEpochDiff(epochDiff) if err != nil { panic(err) } - for _, epochDiff := range epochDiffs { - for _, outputWithMetadata := range epochDiff.Created() { - deps.Indexer.IndexOutput(outputWithMetadata.Output().(devnetvm.Output)) - } + for _, outputWithMetadata := range epochDiff.Created() { + deps.Indexer.IndexOutput(outputWithMetadata.Output().(devnetvm.Output)) } } - headerConsumer := func(*ledger.SnapshotHeader) {} - - err := snapshot.LoadSnapshot(Parameters.Snapshot.File, headerConsumer, utxoStatesConsumer, epochDiffsConsumer) + emptyHeaderConsumer := func(*ledger.SnapshotHeader) {} + emptySepsConsumer := func(*snapshot.SolidEntryPoints) {} + emptyActivityConsumer := func(activity epoch.SnapshotEpochActivity) {} + err := snapshot.LoadSnapshot(Parameters.Snapshot.File, emptyHeaderConsumer, emptySepsConsumer, utxoStatesConsumer, epochDiffsConsumer, emptyActivityConsumer) if err != nil { plugin.Panic("could not load snapshot file:", err) } @@ -234,7 +234,7 @@ func newTangle(tangleDeps tangledeps) *tangleold.Tangle { ) tangleInstance.Scheduler = tangleold.NewScheduler(tangleInstance) - tangleInstance.WeightProvider = tangleold.NewCManaWeightProvider(GetCMana, tangleInstance.TimeManager.ActivityTime, tangleDeps.Storage) + tangleInstance.WeightProvider = tangleold.NewCManaWeightProvider(GetCMana, tangleInstance.TimeManager.ActivityTime, GetConfirmedEI, tangleDeps.Storage) tangleInstance.OTVConsensusManager = tangleold.NewOTVConsensusManager(otv.NewOnTangleVoting(tangleInstance.Ledger.ConflictDAG, tangleInstance.ApprovalWeightManager.WeightOfConflict)) acceptanceGadget = acceptance.NewSimpleFinalityGadget(tangleInstance) diff --git a/plugins/blocklayer/snapshot_plugin.go b/plugins/blocklayer/snapshot_plugin.go new file mode 100644 index 0000000000..7c34004177 --- /dev/null +++ b/plugins/blocklayer/snapshot_plugin.go @@ -0,0 +1,103 @@ +package blocklayer + +import ( + "context" + "github.com/iotaledger/goshimmer/packages/core/epoch" + + "github.com/iotaledger/hive.go/core/daemon" + "github.com/iotaledger/hive.go/core/generics/event" + "github.com/iotaledger/hive.go/core/kvstore" + "github.com/iotaledger/hive.go/core/node" + "go.uber.org/dig" + + "github.com/iotaledger/goshimmer/packages/core/ledger" + "github.com/iotaledger/goshimmer/packages/core/notarization" + "github.com/iotaledger/goshimmer/packages/core/snapshot" + "github.com/iotaledger/goshimmer/packages/node/shutdown" + + "github.com/iotaledger/goshimmer/packages/core/tangleold" +) + +const ( + // SnapshotPluginName is the name of the snapshot plugin. + SnapshotPluginName = "Snapshot" +) + +type snapshotPluginDependencies struct { + dig.In + + Tangle *tangleold.Tangle + Manager *snapshot.Manager + NotarizationMgr *notarization.Manager +} + +type snapshotDependencies struct { + dig.In + + NotarizationMgr *notarization.Manager + Storage kvstore.KVStore +} + +var ( + SnapshotPlugin *node.Plugin + snapshotDeps = new(snapshotPluginDependencies) +) + +func init() { + SnapshotPlugin = node.NewPlugin(SnapshotPluginName, snapshotDeps, node.Enabled, configureSnapshotPlugin, runSnapshotPlugin) + + SnapshotPlugin.Events.Init.Hook(event.NewClosure(func(event *node.InitEvent) { + if err := event.Container.Provide(newSnapshotManager); err != nil { + SnapshotPlugin.Panic(err) + } + })) +} + +func configureSnapshotPlugin(plugin *node.Plugin) { + if Parameters.Snapshot.File != "" { + emptyHeaderConsumer := func(*ledger.SnapshotHeader) {} + emptyOutputsConsumer := func([]*ledger.OutputWithMetadata) {} + emptyEpochDiffsConsumer := func(*ledger.EpochDiff) {} + emptyActivityLogConsumer := func(activity epoch.SnapshotEpochActivity) {} + + err := snapshot.LoadSnapshot(Parameters.Snapshot.File, + emptyHeaderConsumer, + snapshotDeps.Manager.LoadSolidEntryPoints, + emptyOutputsConsumer, + emptyEpochDiffsConsumer, + emptyActivityLogConsumer) + if err != nil { + plugin.Panic("could not load snapshot file:", err) + } + } + + snapshotDeps.Tangle.ConfirmationOracle.Events().BlockAccepted.Attach(event.NewClosure(func(e *tangleold.BlockAcceptedEvent) { + e.Block.ForEachParentByType(tangleold.StrongParentType, func(parent tangleold.BlockID) bool { + index := parent.EpochIndex + if index < e.Block.ID().EpochIndex { + snapshotDeps.Manager.InsertSolidEntryPoint(parent) + } + return true + }) + })) + + snapshotDeps.Tangle.ConfirmationOracle.Events().BlockOrphaned.Attach(event.NewClosure(func(event *tangleold.BlockAcceptedEvent) { + snapshotDeps.Manager.RemoveSolidEntryPoint(event.Block) + })) + + snapshotDeps.NotarizationMgr.Events.EpochCommittable.Attach(event.NewClosure(func(e *notarization.EpochCommittableEvent) { + snapshotDeps.Manager.AdvanceSolidEntryPoints(e.EI) + })) +} + +func runSnapshotPlugin(*node.Plugin) { + if err := daemon.BackgroundWorker("Snapshot", func(ctx context.Context) { + <-ctx.Done() + }, shutdown.PriorityNotarization); err != nil { + SnapshotPlugin.Panicf("Failed to start as daemon: %s", err) + } +} + +func newSnapshotManager(deps snapshotDependencies) *snapshot.Manager { + return snapshot.NewManager(deps.NotarizationMgr, NotarizationParameters.SnapshotDepth) +} diff --git a/plugins/core.go b/plugins/core.go index 3625af30f8..b33c99089d 100644 --- a/plugins/core.go +++ b/plugins/core.go @@ -5,7 +5,7 @@ import ( "github.com/iotaledger/goshimmer/plugins/autopeering" "github.com/iotaledger/goshimmer/plugins/banner" - blocklayer "github.com/iotaledger/goshimmer/plugins/blocklayer" + "github.com/iotaledger/goshimmer/plugins/blocklayer" "github.com/iotaledger/goshimmer/plugins/bootstrapmanager" "github.com/iotaledger/goshimmer/plugins/cli" "github.com/iotaledger/goshimmer/plugins/clock" @@ -26,6 +26,7 @@ import ( "github.com/iotaledger/goshimmer/plugins/pow" "github.com/iotaledger/goshimmer/plugins/profiling" "github.com/iotaledger/goshimmer/plugins/spammer" + "github.com/iotaledger/goshimmer/plugins/warpsync" ) // Core contains the core plugins of a GoShimmer node. @@ -46,9 +47,11 @@ var Core = node.Plugins( blocklayer.Plugin, p2p.Plugin, gossip.Plugin, + warpsync.Plugin, firewall.Plugin, blocklayer.ManaPlugin, blocklayer.NotarizationPlugin, + blocklayer.SnapshotPlugin, bootstrapmanager.Plugin, faucet.Plugin, metrics.Plugin, diff --git a/plugins/dashboard/explorer_routes.go b/plugins/dashboard/explorer_routes.go index 1e6b1faeb4..26005f1f7b 100644 --- a/plugins/dashboard/explorer_routes.go +++ b/plugins/dashboard/explorer_routes.go @@ -16,7 +16,6 @@ import ( "github.com/iotaledger/goshimmer/packages/core/ledger/utxo" "github.com/iotaledger/goshimmer/packages/core/ledger/vm/devnetvm" "github.com/iotaledger/goshimmer/packages/core/ledger/vm/devnetvm/indexer" - "github.com/iotaledger/goshimmer/packages/core/notarization" "github.com/iotaledger/goshimmer/packages/core/tangleold" "github.com/iotaledger/goshimmer/plugins/chat" ledgerstateAPI "github.com/iotaledger/goshimmer/plugins/webapi/ledgerstate" @@ -84,7 +83,7 @@ func createExplorerBlock(blk *tangleold.Block) *ExplorerBlock { conflictIDs, _ := deps.Tangle.Booker.BlockConflictIDs(blockID) - ecRecord := epoch.NewECRecord(blk.EI()) + ecRecord := epoch.NewECRecord(blk.ECRecordEI()) ecRecord.SetECR(blk.ECR()) ecRecord.SetPrevEC(blk.PrevEC()) @@ -112,8 +111,8 @@ func createExplorerBlock(blk *tangleold.Block) *ExplorerBlock { ConfirmationStateTime: blockMetadata.ConfirmationStateTime().Unix(), PayloadType: uint32(blk.Payload().Type()), Payload: ProcessPayload(blk.Payload()), - EC: notarization.EC(ecRecord).Base58(), - EI: uint64(blk.EI()), + EC: ecRecord.ComputeEC().Base58(), + EI: uint64(blk.ECRecordEI()), ECR: blk.ECR().Base58(), PrevEC: blk.PrevEC().Base58(), LatestConfirmedEpoch: uint64(blk.LatestConfirmedEpoch()), diff --git a/plugins/database/versioning.go b/plugins/database/versioning.go index dc3ef96348..a753565aa7 100644 --- a/plugins/database/versioning.go +++ b/plugins/database/versioning.go @@ -11,7 +11,7 @@ import ( const ( // DBVersion defines the version of the database schema this version of GoShimmer supports. // Every time there's a breaking change regarding the stored data, this version flag should be adjusted. - DBVersion = 62 + DBVersion = 63 ) var ( diff --git a/plugins/epochstorage/plugin.go b/plugins/epochstorage/plugin.go index 774ec62343..04ea4e34f4 100644 --- a/plugins/epochstorage/plugin.go +++ b/plugins/epochstorage/plugin.go @@ -11,6 +11,7 @@ import ( "github.com/iotaledger/hive.go/core/daemon" "github.com/iotaledger/hive.go/core/generics/event" + "github.com/iotaledger/hive.go/core/generics/shrinkingmap" "github.com/iotaledger/hive.go/core/identity" "github.com/iotaledger/hive.go/core/kvstore" "github.com/iotaledger/hive.go/core/node" @@ -37,16 +38,16 @@ var ( baseStore kvstore.KVStore committableEpochsMutex sync.RWMutex - committableEpochs = make([]*epoch.ECRecord, 0) + committableEpochs = shrinkingmap.New[epoch.Index, *epoch.ECRecord]() epochVotersWeightMutex sync.RWMutex - epochVotersWeight = make(map[epoch.Index]map[epoch.ECR]map[identity.ID]float64, 0) - epochVotersLatestVote = make(map[identity.ID]*latestVote, 0) + epochVotersWeight = shrinkingmap.New[epoch.Index, map[epoch.ECR]map[identity.ID]float64]() + epochVotersLatestVote = shrinkingmap.New[identity.ID, *latestVote]() maxEpochContentsToKeep = 100 numEpochContentsToRemove = 20 epochOrderMutex sync.RWMutex - epochOrderMap = make(map[epoch.Index]types.Empty, 0) + epochOrderMap = shrinkingmap.New[epoch.Index, types.Empty]() epochOrder = make([]epoch.Index, 0) ) @@ -55,6 +56,7 @@ type latestVote struct { ecr epoch.ECR issuedTime time.Time } + type dependencies struct { dig.In @@ -80,26 +82,26 @@ func configure(plugin *node.Plugin) { deps.NotarizationMgr.Events.TangleTreeInserted.Attach(event.NewClosure(func(event *notarization.TangleTreeUpdatedEvent) { epochOrderMutex.Lock() - if _, ok := epochOrderMap[event.EI]; !ok { - epochOrderMap[event.EI] = types.Void + if _, ok := epochOrderMap.Get(event.EI); !ok { + epochOrderMap.Set(event.EI, types.Void) epochOrder = append(epochOrder, event.EI) checkEpochContentLimit() } epochOrderMutex.Unlock() - err := insertblockToEpoch(event.EI, event.BlockID) + err := insertBlockIntoEpoch(event.EI, event.BlockID) if err != nil { plugin.LogDebug(err) } })) deps.NotarizationMgr.Events.TangleTreeRemoved.Attach(event.NewClosure(func(event *notarization.TangleTreeUpdatedEvent) { - err := removeblockFromEpoch(event.EI, event.BlockID) + err := removeBlockFromEpoch(event.EI, event.BlockID) if err != nil { plugin.LogDebug(err) } })) deps.NotarizationMgr.Events.StateMutationTreeInserted.Attach(event.NewClosure(func(event *notarization.StateMutationTreeUpdatedEvent) { - err := insertTransactionToEpoch(event.EI, event.TransactionID) + err := insertTransactionIntoEpoch(event.EI, event.TransactionID) if err != nil { plugin.LogDebug(err) } @@ -111,7 +113,7 @@ func configure(plugin *node.Plugin) { } })) deps.NotarizationMgr.Events.UTXOTreeInserted.Attach(event.NewClosure(func(event *notarization.UTXOUpdatedEvent) { - err := insertOutputsToEpoch(event.EI, event.Spent, event.Created) + err := insertOutputsIntoEpoch(event.EI, event.Spent, event.Created) if err != nil { plugin.LogDebug(err) } @@ -125,7 +127,7 @@ func configure(plugin *node.Plugin) { deps.NotarizationMgr.Events.EpochCommittable.Attach(event.NewClosure(func(event *notarization.EpochCommittableEvent) { committableEpochsMutex.Lock() defer committableEpochsMutex.Unlock() - committableEpochs = append(committableEpochs, event.ECRecord) + committableEpochs.Set(event.EI, event.ECRecord) })) deps.Tangle.ConfirmationOracle.Events().BlockAccepted.Attach(event.NewClosure(func(event *tangleold.BlockAcceptedEvent) { @@ -147,6 +149,14 @@ func checkEpochContentLimit() { return } + epochVotersWeightMutex.Lock() + committableEpochsMutex.Lock() + + defer func() { + epochVotersWeightMutex.Unlock() + committableEpochsMutex.Unlock() + }() + // sort the order list to remove the oldest ones. sort.Slice(epochOrder, func(i, j int) bool { return epochOrder[i] < epochOrder[j] @@ -158,44 +168,43 @@ func checkEpochContentLimit() { epochOrder = epochOrder[numEpochContentsToRemove:] for _, i := range epochToRemove { - delete(epochOrderMap, i) - } - - epochVotersWeightMutex.Lock() - for _, i := range epochToRemove { - delete(epochVotersWeight, i) - } - epochVotersWeightMutex.Unlock() - - committableEpochsMutex.Lock() - if len(committableEpochs) < maxEpochContentsToKeep { - committableEpochsMutex.Unlock() - return + epochOrderMap.Delete(i) + epochVotersWeight.Delete(i) + committableEpochs.Delete(i) } - committableEpochs = committableEpochs[len(committableEpochs)-maxEpochContentsToKeep:] - committableEpochsMutex.Unlock() } func GetCommittableEpochs() (ecRecords map[epoch.Index]*epoch.ECRecord) { - ecRecords = make(map[epoch.Index]*epoch.ECRecord, 0) - committableEpochsMutex.RLock() - for _, record := range committableEpochs { - ecRecords[record.EI()] = record - } - committableEpochsMutex.RUnlock() + defer committableEpochsMutex.RUnlock() + + ecRecords = make(map[epoch.Index]*epoch.ECRecord, committableEpochs.Size()) + committableEpochs.ForEach(func(ei epoch.Index, ecRecord *epoch.ECRecord) bool { + ecRecords[ei] = ecRecord + return true + }) return } +func GetEpochCommittment(ei epoch.Index) (ecRecord *epoch.ECRecord, exists bool) { + committableEpochsMutex.RLock() + defer committableEpochsMutex.RUnlock() + + return committableEpochs.Get(ei) +} + func GetPendingConflictCount() map[epoch.Index]uint64 { return deps.NotarizationMgr.PendingConflictsCountAll() } -func GetEpochblocks(ei epoch.Index) (blockIDs []tangleold.BlockID) { - prefix := append([]byte{database.PrefixEpochsStorage, prefixBlockIDs}, ei.Bytes()...) +func GetEpochBlockIDs(ei epoch.Index) (blockIDs []tangleold.BlockID) { + blockStore, err := baseStore.WithRealm(append([]byte{database.PrefixEpochsStorage, prefixBlockIDs}, ei.Bytes()...)) + if err != nil { + panic(err) + } - baseStore.IterateKeys(prefix, func(key kvstore.Key) bool { + blockStore.IterateKeys(kvstore.EmptyPrefix, func(key kvstore.Key) bool { var blockID tangleold.BlockID if _, err := blockID.FromBytes(key); err != nil { panic("BlockID could not be parsed!") @@ -203,13 +212,17 @@ func GetEpochblocks(ei epoch.Index) (blockIDs []tangleold.BlockID) { blockIDs = append(blockIDs, blockID) return true }) + return } func GetEpochTransactions(ei epoch.Index) (txIDs []utxo.TransactionID) { - prefix := append([]byte{database.PrefixEpochsStorage, prefixTransactionIDs}, ei.Bytes()...) + txStore, err := baseStore.WithRealm(append([]byte{database.PrefixEpochsStorage, prefixTransactionIDs}, ei.Bytes()...)) + if err != nil { + panic(err) + } - baseStore.IterateKeys(prefix, func(key kvstore.Key) bool { + txStore.IterateKeys(kvstore.EmptyPrefix, func(key kvstore.Key) bool { var txID utxo.TransactionID if _, err := txID.Decode(key); err != nil { panic("TransactionID could not be parsed!") @@ -222,10 +235,17 @@ func GetEpochTransactions(ei epoch.Index) (txIDs []utxo.TransactionID) { } func GetEpochUTXOs(ei epoch.Index) (spent, created []utxo.OutputID) { - createdPrefix := append([]byte{database.PrefixEpochsStorage, prefixCreatedOutput}, ei.Bytes()...) - spentPrefix := append([]byte{database.PrefixEpochsStorage, prefixSpentOutput}, ei.Bytes()...) + spentStore, err := baseStore.WithRealm(append([]byte{database.PrefixEpochsStorage, prefixSpentOutput}, ei.Bytes()...)) + if err != nil { + panic(err) + } + + createdStore, err := baseStore.WithRealm(append([]byte{database.PrefixEpochsStorage, prefixCreatedOutput}, ei.Bytes()...)) + if err != nil { + panic(err) + } - baseStore.IterateKeys(spentPrefix, func(key kvstore.Key) bool { + spentStore.IterateKeys(kvstore.EmptyPrefix, func(key kvstore.Key) bool { var outputID utxo.OutputID if err := outputID.FromBytes(key); err != nil { panic(err) @@ -235,7 +255,7 @@ func GetEpochUTXOs(ei epoch.Index) (spent, created []utxo.OutputID) { return true }) - baseStore.IterateKeys(createdPrefix, func(key kvstore.Key) bool { + createdStore.IterateKeys(kvstore.EmptyPrefix, func(key kvstore.Key) bool { var outputID utxo.OutputID if err := outputID.FromBytes(key); err != nil { panic(err) @@ -250,12 +270,13 @@ func GetEpochUTXOs(ei epoch.Index) (spent, created []utxo.OutputID) { func GetEpochVotersWeight(ei epoch.Index) (weights map[epoch.ECR]map[identity.ID]float64) { epochVotersWeightMutex.RLock() defer epochVotersWeightMutex.RUnlock() - if _, ok := epochVotersWeight[ei]; !ok { + if _, ok := epochVotersWeight.Get(ei); !ok { return } - weights = make(map[epoch.ECR]map[identity.ID]float64, len(epochVotersWeight[ei])) - for ecr, voterWeights := range epochVotersWeight[ei] { + weights = make(map[epoch.ECR]map[identity.ID]float64, epochVotersWeight.Size()) + epochVoters, _ := epochVotersWeight.Get(ei) + for ecr, voterWeights := range epochVoters { subDuplicate := make(map[identity.ID]float64, len(voterWeights)) for id, w := range voterWeights { subDuplicate[id] = w @@ -265,38 +286,38 @@ func GetEpochVotersWeight(ei epoch.Index) (weights map[epoch.ECR]map[identity.ID return weights } -func insertblockToEpoch(ei epoch.Index, blkID tangleold.BlockID) error { +func insertBlockIntoEpoch(ei epoch.Index, blkID tangleold.BlockID) error { blockStore, err := baseStore.WithRealm(append([]byte{database.PrefixEpochsStorage, prefixBlockIDs}, ei.Bytes()...)) if err != nil { panic(err) } if err := blockStore.Set(blkID.Bytes(), blkID.Bytes()); err != nil { - return errors.New("Fail to insert block to epoch store") + return errors.New("fail to insert block to epoch store") } return nil } -func removeblockFromEpoch(ei epoch.Index, blkID tangleold.BlockID) error { +func removeBlockFromEpoch(ei epoch.Index, blkID tangleold.BlockID) error { blockStore, err := baseStore.WithRealm(append([]byte{database.PrefixEpochsStorage, prefixBlockIDs}, ei.Bytes()...)) if err != nil { panic(err) } if err := blockStore.Delete(blkID.Bytes()); err != nil { - return errors.New("Fail to remove block from epoch store") + return errors.New("fail to remove block from epoch store") } return nil } -func insertTransactionToEpoch(ei epoch.Index, txID utxo.TransactionID) error { +func insertTransactionIntoEpoch(ei epoch.Index, txID utxo.TransactionID) error { txStore, err := baseStore.WithRealm(append([]byte{database.PrefixEpochsStorage, prefixTransactionIDs}, ei.Bytes()...)) if err != nil { panic(err) } if err := txStore.Set(txID.Bytes(), txID.Bytes()); err != nil { - return errors.New("Fail to insert Transaction to epoch store") + return errors.New("fail to insert Transaction to epoch store") } return nil } @@ -308,12 +329,12 @@ func removeTransactionFromEpoch(ei epoch.Index, txID utxo.TransactionID) error { } if err := txStore.Delete(txID.Bytes()); err != nil { - return errors.New("Fail to remove Transaction from epoch store") + return errors.New("fail to remove Transaction from epoch store") } return nil } -func insertOutputsToEpoch(ei epoch.Index, spent, created []*ledger.OutputWithMetadata) error { +func insertOutputsIntoEpoch(ei epoch.Index, spent, created []*ledger.OutputWithMetadata) error { createdStore, err := baseStore.WithRealm(append([]byte{database.PrefixEpochsStorage, prefixCreatedOutput}, ei.Bytes()...)) if err != nil { panic(err) @@ -326,13 +347,13 @@ func insertOutputsToEpoch(ei epoch.Index, spent, created []*ledger.OutputWithMet for _, s := range spent { if err := spentStore.Set(s.ID().Bytes(), s.ID().Bytes()); err != nil { - return errors.New("Fail to insert spent output to epoch store") + return errors.New("fail to insert spent output to epoch store") } } for _, c := range created { if err := createdStore.Set(c.ID().Bytes(), c.ID().Bytes()); err != nil { - return errors.New("Fail to insert created output to epoch store") + return errors.New("fail to insert created output to epoch store") } } @@ -352,13 +373,13 @@ func removeOutputsFromEpoch(ei epoch.Index, spent, created []*ledger.OutputWithM for _, s := range spent { if err := spentStore.Delete(s.ID().Bytes()); err != nil { - return errors.New("Fail to remove spent output from epoch store") + return errors.New("fail to remove spent output from epoch store") } } for _, c := range created { if err := createdStore.Delete(c.ID().Bytes()); err != nil { - return errors.New("Fail to remove created output from epoch store") + return errors.New("fail to remove created output from epoch store") } } @@ -371,23 +392,26 @@ func saveEpochVotersWeight(block *tangleold.Block) { epochVotersWeightMutex.Lock() defer epochVotersWeightMutex.Unlock() - epochIndex := block.M.EI - ecr := block.M.ECR - if _, ok := epochVotersWeight[epochIndex]; !ok { - epochVotersWeight[epochIndex] = make(map[epoch.ECR]map[identity.ID]float64) + epochIndex := block.ECRecordEI() + ecr := block.ECR() + if _, ok := epochVotersWeight.Get(epochIndex); !ok { + epochVotersWeight.Set(epochIndex, make(map[epoch.ECR]map[identity.ID]float64)) } - if _, ok := epochVotersWeight[epochIndex][ecr]; !ok { - epochVotersWeight[epochIndex][ecr] = make(map[identity.ID]float64) + epochVoters, _ := epochVotersWeight.Get(epochIndex) + if _, ok := epochVoters[ecr]; !ok { + epochVoters[ecr] = make(map[identity.ID]float64) } - vote, ok := epochVotersLatestVote[voter] + vote, ok := epochVotersLatestVote.Get(voter) if ok { - if vote.ei == epochIndex && vote.ecr != ecr && vote.issuedTime.Before(block.M.IssuingTime) { - delete(epochVotersWeight[vote.ei][vote.ecr], voter) + if vote.ei == epochIndex && vote.ecr != ecr && vote.issuedTime.Before(block.IssuingTime()) { + epochVoters, _ := epochVotersWeight.Get(vote.ei) + delete(epochVoters[vote.ecr], voter) } } - epochVotersLatestVote[voter] = &latestVote{ei: epochIndex, ecr: ecr, issuedTime: block.M.IssuingTime} - epochVotersWeight[epochIndex][ecr][voter] = activeWeights[voter] + + epochVotersLatestVote.Set(voter, &latestVote{ei: epochIndex, ecr: ecr, issuedTime: block.IssuingTime()}) + epochVoters[ecr][voter] = activeWeights[voter] } // region db prefixes ////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/plugins/faucet/connector.go b/plugins/faucet/connector.go new file mode 100644 index 0000000000..74573123f3 --- /dev/null +++ b/plugins/faucet/connector.go @@ -0,0 +1,102 @@ +package faucet + +import ( + "github.com/iotaledger/hive.go/core/types/confirmation" + "github.com/pkg/errors" + + "github.com/iotaledger/goshimmer/client/wallet" + "github.com/iotaledger/goshimmer/client/wallet/packages/address" + "github.com/iotaledger/goshimmer/packages/core/ledger" + "github.com/iotaledger/goshimmer/packages/core/ledger/utxo" + "github.com/iotaledger/goshimmer/packages/core/ledger/vm/devnetvm" + "github.com/iotaledger/goshimmer/packages/core/ledger/vm/devnetvm/indexer" + "github.com/iotaledger/goshimmer/packages/core/mana" + "github.com/iotaledger/goshimmer/packages/core/tangleold" + "github.com/iotaledger/goshimmer/plugins/blocklayer" +) + +type FaucetConnector struct { + tangle *tangleold.Tangle + indexer *indexer.Indexer +} + +func NewConnector(t *tangleold.Tangle, indexer *indexer.Indexer) *FaucetConnector { + return &FaucetConnector{ + tangle: t, + indexer: indexer, + } +} + +func (f *FaucetConnector) UnspentOutputs(addresses ...address.Address) (unspentOutputs wallet.OutputsByAddressAndOutputID, err error) { + unspentOutputs = make(map[address.Address]map[utxo.OutputID]*wallet.Output) + + for _, addr := range addresses { + f.indexer.CachedAddressOutputMappings(addr.Address()).Consume(func(mapping *indexer.AddressOutputMapping) { + f.tangle.Ledger.Storage.CachedOutput(mapping.OutputID()).Consume(func(output utxo.Output) { + if typedOutput, ok := output.(devnetvm.Output); ok { + f.tangle.Ledger.Storage.CachedOutputMetadata(typedOutput.ID()).Consume(func(outputMetadata *ledger.OutputMetadata) { + if !outputMetadata.IsSpent() { + walletOutput := &wallet.Output{ + Address: addr, + Object: typedOutput, + ConfirmationStateReached: outputMetadata.ConfirmationState().IsAccepted(), + Spent: false, + Metadata: wallet.OutputMetadata{ + Timestamp: outputMetadata.CreationTime(), + }, + } + + // store output in result + if _, addressExists := unspentOutputs[addr]; !addressExists { + unspentOutputs[addr] = make(map[utxo.OutputID]*wallet.Output) + } + unspentOutputs[addr][typedOutput.ID()] = walletOutput + } + }) + } + }) + }) + } + + return +} + +func (f *FaucetConnector) SendTransaction(tx *devnetvm.Transaction) (err error) { + // attach to block layer + issueTransaction := func() (*tangleold.Block, error) { + block, e := deps.Tangle.IssuePayload(tx) + if e != nil { + return nil, e + } + return block, nil + } + + _, err = blocklayer.AwaitBlockToBeBooked(issueTransaction, tx.ID(), Parameters.MaxTransactionBookedAwaitTime) + if err != nil { + return errors.Errorf("%v: tx %s", err, tx.ID().String()) + } + return nil +} + +func (f *FaucetConnector) RequestFaucetFunds(address address.Address, powTarget int) (err error) { + panic("RequestFaucetFunds is not implemented in faucet connector.") +} + +func (f *FaucetConnector) GetAllowedPledgeIDs() (pledgeIDMap map[mana.Type][]string, err error) { + pledgeIDMap = make(map[mana.Type][]string) + pledgeIDMap[mana.AccessMana] = []string{deps.Local.ID().EncodeBase58()} + pledgeIDMap[mana.ConsensusMana] = []string{deps.Local.ID().EncodeBase58()} + + return +} + +func (f *FaucetConnector) GetTransactionConfirmationState(txID utxo.TransactionID) (confirmationState confirmation.State, err error) { + f.tangle.Ledger.Storage.CachedTransactionMetadata(txID).Consume(func(tm *ledger.TransactionMetadata) { + confirmationState = tm.ConfirmationState() + }) + return +} + +func (f *FaucetConnector) GetUnspentAliasOutput(address *devnetvm.AliasAddress) (output *devnetvm.AliasOutput, err error) { + panic("GetUnspentAliasOutput is not implemented in faucet connector.") +} diff --git a/plugins/faucet/faucet.go b/plugins/faucet/faucet.go new file mode 100644 index 0000000000..a3c0825e36 --- /dev/null +++ b/plugins/faucet/faucet.go @@ -0,0 +1,82 @@ +package faucet + +import ( + "context" + "time" + + "github.com/iotaledger/hive.go/core/bitmask" + "github.com/iotaledger/hive.go/core/identity" + + "github.com/iotaledger/goshimmer/client/wallet" + "github.com/iotaledger/goshimmer/client/wallet/packages/address" + "github.com/iotaledger/goshimmer/client/wallet/packages/seed" + "github.com/iotaledger/goshimmer/client/wallet/packages/sendoptions" + "github.com/iotaledger/goshimmer/packages/app/faucet" + "github.com/iotaledger/goshimmer/packages/core/ledger/vm/devnetvm" +) + +type Faucet struct { + *wallet.Wallet +} + +// NewFaucet creates a new Faucet instance. +func NewFaucet(faucetSeed *seed.Seed) (f *Faucet) { + connector := NewConnector(deps.Tangle, deps.Indexer) + + f = &Faucet{wallet.New( + wallet.GenericConnector(connector), + wallet.Import(faucetSeed, 0, []bitmask.BitMask{}, nil), + wallet.ReusableAddress(true), + wallet.FaucetPowDifficulty(Parameters.PowDifficulty), + wallet.ConfirmationTimeout(Parameters.MaxAwait), + wallet.ConfirmationPollingInterval(500*time.Millisecond), + )} + // We use index 1 as a proxy address from which we send the funds to the requester. + f.Wallet.NewReceiveAddress() + + return f +} + +// Start starts the faucet to fulfill faucet requests. +func (f *Faucet) Start(ctx context.Context, requestChan <-chan *faucet.Payload) { + for { + select { + case p := <-requestChan: + tx, err := f.handleFaucetRequest(p, ctx) + if err != nil { + Plugin.LogErrorf("fail to send funds to %s: %v", p.Address().Base58(), err) + continue + } + Plugin.LogInfof("sent funds to %s: TXID: %s", p.Address().Base58(), tx.ID().Base58()) + + case <-ctx.Done(): + return + } + } +} + +// handleFaucetRequest sends funds to the requested address and waits for the transaction to become accepted. +func (f *Faucet) handleFaucetRequest(p *faucet.Payload, ctx context.Context) (*devnetvm.Transaction, error) { + _, err := f.SendFunds( + sendoptions.Sources(f.Seed().Address(0)), // we only reuse the address at index 0 for the wallet + sendoptions.Destination(f.Seed().Address(1), uint64(Parameters.TokensPerRequest)), // we send the funds to address at index 1 so that we can be sure the correct output is sent to a requester + sendoptions.AccessManaPledgeID(deps.Local.ID().EncodeBase58()), + sendoptions.ConsensusManaPledgeID(identity.ID{}.EncodeBase58()), + sendoptions.WaitForConfirmation(true), + sendoptions.Context(ctx), + ) + if err != nil { + return nil, err + } + + // send funds to requester + tx, err := f.SendFunds( + sendoptions.Sources(f.Seed().Address(1)), + sendoptions.Destination(address.Address{AddressBytes: p.Address().Array()}, uint64(Parameters.TokensPerRequest)), + sendoptions.AccessManaPledgeID(p.AccessManaPledgeID().EncodeBase58()), + sendoptions.ConsensusManaPledgeID(p.ConsensusManaPledgeID().EncodeBase58()), + sendoptions.WaitForConfirmation(true), + sendoptions.Context(ctx), + ) + return tx, err +} diff --git a/plugins/faucet/parameters.go b/plugins/faucet/parameters.go index 0c7e2e9347..d026507b6e 100644 --- a/plugins/faucet/parameters.go +++ b/plugins/faucet/parameters.go @@ -21,19 +21,8 @@ type ParametersDefinition struct { // PowDifficulty defines the PoW difficulty for faucet payloads. PowDifficulty int `default:"22" usage:"defines the PoW difficulty for faucet payloads"` - // BlacklistCapacity holds the maximum amount the address blacklist holds. - // An address for which a funding was done in the past is added to the blacklist and eventually is removed from it. - BlacklistCapacity int `default:"10000" usage:"holds the maximum amount the address blacklist holds"` - - // SupplyOutputsCount is the number of supply outputs, and splitting transactions accordingly, the faucet prepares. - SupplyOutputsCount int `default:"20" usage:"the number of supply outputs, and splitting transactions accordingly, the faucet prepares."` - - // SplittingMultiplier defines how many outputs each splitting transaction will have. - // SplittingMultiplier * SupplyOutputsCount indicates how many funding outputs during funds replenishment. - SplittingMultiplier int `default:"25" usage:"SplittingMultiplier defines how many outputs each supply transaction will have."` - - // GenesisTokenAmount is the total supply. - GenesisTokenAmount uint64 `default:"1000000000000000" usage:"GenesisTokenAmount is the total supply."` + // MaxWaitAttempts defines the maximum time to wait for a transaction to be accepted. + MaxAwait time.Duration `default:"60s" usage:"the maximum time to wait for a transaction to be accepted"` } // Parameters contains the configuration parameters of the faucet plugin. diff --git a/plugins/faucet/plugin.go b/plugins/faucet/plugin.go index 538990f7c5..541dd18362 100644 --- a/plugins/faucet/plugin.go +++ b/plugins/faucet/plugin.go @@ -2,18 +2,14 @@ package faucet import ( "context" - "runtime" - "sync" "time" "github.com/cockroachdb/errors" "github.com/iotaledger/hive.go/core/autopeering/peer" "github.com/iotaledger/hive.go/core/daemon" "github.com/iotaledger/hive.go/core/generics/event" - "github.com/iotaledger/hive.go/core/generics/orderedmap" "github.com/iotaledger/hive.go/core/identity" "github.com/iotaledger/hive.go/core/node" - "github.com/iotaledger/hive.go/core/workerpool" "github.com/mr-tron/base58" "go.uber.org/atomic" "go.uber.org/dig" @@ -38,20 +34,13 @@ const ( var ( // Plugin is the "plugin" instance of the faucet application. - Plugin *node.Plugin - _faucet *StateManager - powVerifier = pow.New() - fundingWorkerPool *workerpool.NonBlockingQueuedWorkerPool - fundingWorkerCount = runtime.GOMAXPROCS(0) - fundingWorkerQueueSize = 500 - preparingWorkerPool *workerpool.NonBlockingQueuedWorkerPool - preparingWorkerCount = runtime.GOMAXPROCS(0) - preparingWorkerQueueSize = MaxFaucetOutputsCount + 1 - targetPoWDifficulty int - // blacklist makes sure that an address might only request tokens once. - blacklist *orderedmap.OrderedMap[string, bool] - blacklistCapacity int - blackListMutex sync.RWMutex + Plugin *node.Plugin + _faucet *Faucet + powVerifier = pow.New() + requestChanSize = 300 + requestChan = make(chan *faucet.Payload, requestChanSize) + targetPoWDifficulty int + // signals that the faucet has initialized itself and can start funding requests. initDone atomic.Bool bootstrapped chan bool @@ -74,7 +63,7 @@ func init() { } // newFaucet gets the faucet component instance the faucet plugin has initialized. -func newFaucet() *StateManager { +func newFaucet() *Faucet { if Parameters.Seed == "" { Plugin.LogFatalAndExit("a seed must be defined when enabling the faucet plugin") } @@ -88,46 +77,12 @@ func newFaucet() *StateManager { if Parameters.MaxTransactionBookedAwaitTime <= 0 { Plugin.LogFatalfAndExit("the max transaction booked await time must be more than 0") } - if Parameters.SupplyOutputsCount <= 0 { - Plugin.LogFatalfAndExit("the number of faucet supply outputs should be more than 0") - } - if Parameters.SplittingMultiplier <= 0 { - Plugin.LogFatalfAndExit("the number of outputs for each supply transaction during funds splitting should be more than 0") - } - if Parameters.GenesisTokenAmount <= 0 { - Plugin.LogFatalfAndExit("the total supply should be more than 0") - } - return NewStateManager( - uint64(Parameters.TokensPerRequest), - walletseed.NewSeed(seedBytes), - uint64(Parameters.SupplyOutputsCount), - uint64(Parameters.SplittingMultiplier), - - Parameters.MaxTransactionBookedAwaitTime, - ) + + return NewFaucet(walletseed.NewSeed(seedBytes)) } func configure(plugin *node.Plugin) { targetPoWDifficulty = Parameters.PowDifficulty - blacklist = orderedmap.New[string, bool]() - blacklistCapacity = Parameters.BlacklistCapacity - _faucet = newFaucet() - - fundingWorkerPool = workerpool.NewNonBlockingQueuedWorkerPool(func(task workerpool.Task) { - faucetRequest := task.Param(0).(*faucet.Payload) - addr := faucetRequest.Address() - - blk, txID, err := _faucet.FulFillFundingRequest(faucetRequest) - if err != nil { - plugin.LogWarnf("couldn't fulfill funding request to %s: %s", addr.Base58(), err) - return - } - plugin.LogInfof("sent funds to address %s via tx %s and blk %s", addr.Base58(), txID, blk.ID()) - }, workerpool.WorkerCount(fundingWorkerCount), workerpool.QueueSize(fundingWorkerQueueSize)) - - preparingWorkerPool = workerpool.NewNonBlockingQueuedWorkerPool(_faucet.prepareTransactionTask, - workerpool.WorkerCount(preparingWorkerCount), workerpool.QueueSize(preparingWorkerQueueSize)) - bootstrapped = make(chan bool, 1) configureEvents() @@ -144,28 +99,18 @@ func run(plugin *node.Plugin) { plugin.LogInfo("Waiting for node to become bootstrapped... done") plugin.LogInfo("Waiting for node to have sufficient access mana") - if err := waitForMana(ctx); err != nil { + if err := checkForMana(ctx); err != nil { plugin.LogErrorf("failed to get sufficient access mana: %s", err) return } plugin.LogInfo("Waiting for node to have sufficient access mana... done") - plugin.LogInfof("Deriving faucet state from the ledger...") - - // determine state, prepare more outputs if needed - if err := _faucet.DeriveStateFromTangle(ctx); err != nil { - plugin.LogErrorf("failed to derive state: %s", err) - return - } - plugin.LogInfo("Deriving faucet state from the ledger... done") - - defer fundingWorkerPool.Stop() - defer preparingWorkerPool.Stop() - initDone.Store(true) - <-ctx.Done() - plugin.LogInfof("Stopping %s ...", PluginName) + _faucet = newFaucet() + _faucet.Start(ctx, requestChan) + + close(requestChan) }, shutdown.PriorityFaucet); err != nil { plugin.Logger().Panicf("Failed to start daemon: %s", err) } @@ -186,27 +131,18 @@ func waitUntilBootstrapped(ctx context.Context) bool { } } -func waitForMana(ctx context.Context) error { +func checkForMana(ctx context.Context) error { nodeID := deps.Tangle.Options.Identity.ID() - for { - // stop polling, if we are shutting down - select { - case <-ctx.Done(): - return errors.New("faucet shutting down") - default: - } - aMana, _, err := blocklayer.GetAccessMana(nodeID) - // ignore ErrNodeNotFoundInBaseManaVector and treat it as 0 mana - if err != nil && !errors.Is(err, mana.ErrNodeNotFoundInBaseManaVector) { - return err - } - if aMana >= tangleold.MinMana { - return nil - } - Plugin.LogDebugf("insufficient access mana: %f < %f", aMana, tangleold.MinMana) - time.Sleep(waitForManaWindow) + aMana, _, err := blocklayer.GetAccessMana(nodeID) + // ignore ErrNodeNotFoundInBaseManaVector and treat it as 0 mana + if err != nil && !errors.Is(err, mana.ErrNodeNotFoundInBaseManaVector) { + return err + } + if aMana < tangleold.MinMana { + return errors.Errorf("insufficient access mana: %f < %f", aMana, tangleold.MinMana) } + return nil } func configureEvents() { @@ -271,11 +207,6 @@ func handleFaucetRequest(fundingRequest *faucet.Payload, pledge ...identity.ID) return errors.New("PoW requirement is not satisfied") } - if IsAddressBlackListed(addr) { - Plugin.LogInfof("can't fund address %s since it is blacklisted", addr.Base58()) - return errors.Newf("can't fund address %s since it is blacklisted %s", addr.Base58()) - } - emptyID := identity.ID{} if len(pledge) == 2 { if fundingRequest.AccessManaPledgeID() == emptyID { @@ -287,12 +218,7 @@ func handleFaucetRequest(fundingRequest *faucet.Payload, pledge ...identity.ID) } // finally add it to the faucet to be processed - _, added := fundingWorkerPool.TrySubmit(fundingRequest) - if !added { - RemoveAddressFromBlacklist(addr) - Plugin.LogInfof("dropped funding request for address %s as queue is full", addr.Base58()) - return errors.Newf("dropped funding request for address %s as queue is full", addr.Base58()) - } + requestChan <- fundingRequest Plugin.LogInfof("enqueued funding request for address %s", addr.Base58()) return nil @@ -317,38 +243,3 @@ func isFaucetRequestPoWValid(fundingRequest *faucet.Payload, addr devnetvm.Addre return true } - -// IsAddressBlackListed returns if an address is blacklisted. -// adds the given address to the blacklist and removes the oldest blacklist entry if it would go over capacity. -func IsAddressBlackListed(address devnetvm.Address) bool { - blackListMutex.Lock() - defer blackListMutex.Unlock() - - // see if it was already blacklisted - _, blacklisted := blacklist.Get(address.Base58()) - - if blacklisted { - return true - } - - // add it to the blacklist - blacklist.Set(address.Base58(), true) - if blacklist.Size() > blacklistCapacity { - var headKey string - blacklist.ForEach(func(key string, value bool) bool { - headKey = key - return false - }) - blacklist.Delete(headKey) - } - - return false -} - -// RemoveAddressFromBlacklist removes an address from the blacklist. -func RemoveAddressFromBlacklist(address devnetvm.Address) { - blackListMutex.Lock() - defer blackListMutex.Unlock() - - blacklist.Delete(address.Base58()) -} diff --git a/plugins/faucet/state_manager.go b/plugins/faucet/state_manager.go deleted file mode 100644 index 7a3f41cdad..0000000000 --- a/plugins/faucet/state_manager.go +++ /dev/null @@ -1,977 +0,0 @@ -package faucet - -import ( - "container/list" - "context" - "sync" - "time" - - "github.com/cockroachdb/errors" - "github.com/iotaledger/hive.go/core/crypto/ed25519" - "github.com/iotaledger/hive.go/core/generics/event" - "github.com/iotaledger/hive.go/core/generics/lo" - "github.com/iotaledger/hive.go/core/identity" - "github.com/iotaledger/hive.go/core/types" - "github.com/iotaledger/hive.go/core/typeutils" - "github.com/iotaledger/hive.go/core/workerpool" - "go.uber.org/atomic" - - walletseed "github.com/iotaledger/goshimmer/client/wallet/packages/seed" - "github.com/iotaledger/goshimmer/packages/app/faucet" - "github.com/iotaledger/goshimmer/packages/core/ledger" - "github.com/iotaledger/goshimmer/packages/core/ledger/utxo" - "github.com/iotaledger/goshimmer/packages/core/ledger/vm/devnetvm" - "github.com/iotaledger/goshimmer/packages/core/ledger/vm/devnetvm/indexer" - "github.com/iotaledger/goshimmer/packages/core/tangleold" - "github.com/iotaledger/goshimmer/packages/node/clock" - "github.com/iotaledger/goshimmer/plugins/blocklayer" -) - -const ( - // RemainderAddressIndex is the RemainderAddressIndex. - RemainderAddressIndex = 0 - - // MinFundingOutputsPercentage defines the min percentage of prepared funding outputs left that triggers a replenishment. - MinFundingOutputsPercentage = 0.3 - - // MaxFaucetOutputsCount defines the max outputs count for the Faucet as the ledgerstate.MaxOutputCount -1 remainder output. - MaxFaucetOutputsCount = devnetvm.MaxOutputCount - 1 - - // WaitForAcceptance defines the wait time before considering a transaction confirmed. - WaitForAcceptance = 10 * time.Second - - // MaxWaitAttempts defines the number of attempts taken while waiting for confirmation during funds preparation. - MaxWaitAttempts = 50 - - // minFaucetBalanceMultiplier defines the multiplier for the min token amount required, before the faucet stops operating. - minFaucetBalanceMultiplier = 0.1 -) - -// FaucetOutput represents an output controlled by the faucet. -type FaucetOutput struct { - ID utxo.OutputID - Balance uint64 - Address devnetvm.Address - AddressIndex uint64 -} - -// StateManager manages the funds and outputs of the faucet. Can derive its state from a synchronized Tangle, can -// carry out funding requests, and prepares more funding outputs when needed. -type StateManager struct { - // the amount of tokens to send to every request - tokensPerRequest uint64 - // number of supply outputs to generate per replenishment that will be split further into funding outputs - targetSupplyOutputsCount uint64 - // number of funding outputs to generate per batch - targetFundingOutputsCount uint64 - // the threshold of remaining available funding outputs under which the faucet starts to replenish new funding outputs - replenishThreshold float64 - // number of funding outputs to generate per supply output in a supply transaction during the splitting period - splittingMultiplier uint64 - // the number of tokens on each supply output - tokensPerSupplyOutput uint64 - // the amount of tokens a supply replenishment will deduct from the faucet remainder - tokensUsedOnSupplyReplenishment uint64 - - // the time to await for the transaction fulfilling a funding request - // to become booked in the value layer - maxTxBookedAwaitTime time.Duration - - // fundingState serves fundingOutputs and its mutex - fundingState *fundingState - - // replenishmentState keeps all variables and related methods used to track faucet state during funds replenishment - replenishmentState *replenishmentState - - // splittingEnv keeps all variables and related methods necessary to split transactions during funds replenishment - splittingEnv *splittingEnv - - // signal received from Faucet background worker on shutdown - shutdownSignal <-chan struct{} -} - -// NewStateManager creates a new state manager for the faucet. -func NewStateManager( - tokensPerRequest uint64, - seed *walletseed.Seed, - supplyOutputsCount uint64, - splittingMultiplier uint64, - maxTxBookedTime time.Duration, -) *StateManager { - // the max number of outputs in a tx is 127, therefore, when creating the splitting tx, we can have at most - // 126 prepared outputs (+1 remainder output). - if supplyOutputsCount > MaxFaucetOutputsCount { - supplyOutputsCount = MaxFaucetOutputsCount - } - // number of outputs for each split supply transaction is also limited by the max num of outputs - if splittingMultiplier > MaxFaucetOutputsCount { - splittingMultiplier = MaxFaucetOutputsCount - } - - fState := newFundingState() - pState := newPreparingState(seed) - - res := &StateManager{ - tokensPerRequest: tokensPerRequest, - targetSupplyOutputsCount: supplyOutputsCount, - targetFundingOutputsCount: supplyOutputsCount * splittingMultiplier, - replenishThreshold: float64(supplyOutputsCount*splittingMultiplier) * MinFundingOutputsPercentage, - splittingMultiplier: splittingMultiplier, - maxTxBookedAwaitTime: maxTxBookedTime, - tokensPerSupplyOutput: tokensPerRequest * splittingMultiplier, - tokensUsedOnSupplyReplenishment: tokensPerRequest * splittingMultiplier * supplyOutputsCount, - - fundingState: fState, - replenishmentState: pState, - } - - return res -} - -// DeriveStateFromTangle derives the faucet state from a synchronized Tangle. -// - remainder output should always sit on address 0. -// - supply outputs should be held on address indices 1-126 -// - funding outputs start from address index 127 -// - if no funding outputs are found, the faucet creates them from the remainder output. -func (s *StateManager) DeriveStateFromTangle(ctx context.Context) (err error) { - s.replenishmentState.IsReplenishing.Set() - defer s.replenishmentState.IsReplenishing.UnSet() - - if err = s.findUnspentRemainderOutput(); err != nil { - return - } - - endIndex := (Parameters.GenesisTokenAmount-s.replenishmentState.RemainderOutputBalance())/s.tokensPerRequest + MaxFaucetOutputsCount - Plugin.LogInfof("Set last funding output address index to %d (%d outputs have been prepared in the faucet's lifetime)", endIndex, endIndex-MaxFaucetOutputsCount) - - s.replenishmentState.SetLastFundingOutputAddressIndex(endIndex) - - // check for any unfinished replenishments and use all available supply outputs - if supplyOutputsFound := s.findSupplyOutputs(); supplyOutputsFound > 0 { - Plugin.LogInfof("Found %d available supply outputs", s.replenishmentState.SupplyOutputsCount()) - Plugin.LogInfo("Will replenish funding outputs with them...") - if err = s.replenishFundingOutputs(); err != nil { - return - } - } - foundFundingOutputs := s.findFundingOutputs() - - if len(foundFundingOutputs) != 0 { - // save all already prepared outputs into the state manager - Plugin.LogInfof("Found and restored %d funding outputs", len(foundFundingOutputs)) - s.saveFundingOutputs(foundFundingOutputs) - } - - if s.replenishThresholdReached() { - Plugin.LogInfof("Preparing more funding outputs...") - if err = s.handleReplenishmentErrors(s.replenishSupplyAndFundingOutputs()); err != nil { - return - } - } - - Plugin.LogInfof("Added new funding outputs, last used address index is %d", s.replenishmentState.GetLastFundingOutputAddressIndex()) - Plugin.LogInfof("There are currently %d funding outputs available", s.fundingState.FundingOutputsCount()) - Plugin.LogInfof("Remainder output %s has %d tokens available", s.replenishmentState.RemainderOutputID().Base58(), s.replenishmentState.RemainderOutputBalance()) - - return err -} - -// FulFillFundingRequest fulfills a faucet request by spending the next funding output to the requested address. -// Mana of the transaction is pledged to the requesting node. -func (s *StateManager) FulFillFundingRequest(faucetReq *faucet.Payload) (*tangleold.Block, string, error) { - if s.replenishThresholdReached() { - // wait for replenishment to finish if there is no funding outputs prepared - waitForPreparation := s.fundingState.FundingOutputsCount() == 0 - s.signalReplenishmentNeeded(waitForPreparation) - } - - // get an output that we can spend - fundingOutput, fErr := s.fundingState.GetFundingOutput() - // we don't have funding outputs - if errors.Is(fErr, ErrNotEnoughFundingOutputs) { - err := errors.Errorf("failed to gather funding outputs: %w", fErr) - return nil, "", err - } - - tx := s.prepareFaucetTransaction(faucetReq.Address(), fundingOutput, faucetReq.AccessManaPledgeID(), faucetReq.ConsensusManaPledgeID()) - - // issue funding request - m, err := s.issueTx(tx) - if err != nil { - return nil, "", err - } - txID := tx.ID().Base58() - - return m, txID, nil -} - -// replenishThresholdReached checks if the replenishment threshold is reached by examining the available -// funding outputs count against the wanted target funding outputs count. -func (s *StateManager) replenishThresholdReached() bool { - return uint64(s.fundingState.FundingOutputsCount()) < uint64(s.replenishThreshold) -} - -// signalReplenishmentNeeded triggers a replenishment of funding outputs if none is currently running. -// if wait is true it awaits for funds to be prepared to not drop requests and block the queue. -func (s *StateManager) signalReplenishmentNeeded(wait bool) { - if s.replenishmentState.IsReplenishing.SetToIf(false, true) { - go func() { - Plugin.LogInfof("Preparing more funding outputs due to replenishment threshold reached...") - _ = s.handleReplenishmentErrors(s.replenishSupplyAndFundingOutputs()) - }() - } - // waits until preparation of funds will finish - if wait { - s.replenishmentState.Wait() - } -} - -// prepareFaucetTransaction prepares a funding faucet transaction that spends fundingOutput to destAddr and pledges -// mana to pledgeID. -func (s *StateManager) prepareFaucetTransaction(destAddr devnetvm.Address, fundingOutput *FaucetOutput, accessManaPledgeID, consensusManaPledgeID identity.ID) (tx *devnetvm.Transaction) { - inputs := devnetvm.NewInputs(devnetvm.NewUTXOInput(fundingOutput.ID)) - - outputs := devnetvm.NewOutputs(devnetvm.NewSigLockedColoredOutput( - devnetvm.NewColoredBalances( - map[devnetvm.Color]uint64{ - devnetvm.ColorIOTA: s.tokensPerRequest, - }), - destAddr, - )) - - essence := devnetvm.NewTransactionEssence( - 0, - clock.SyncedTime(), - accessManaPledgeID, - consensusManaPledgeID, - devnetvm.NewInputs(inputs...), - devnetvm.NewOutputs(outputs...), - ) - - w := wallet{keyPair: *s.replenishmentState.seed.KeyPair(fundingOutput.AddressIndex)} - unlockBlock := devnetvm.NewSignatureUnlockBlock(w.sign(essence)) - - tx = devnetvm.NewTransaction( - essence, - devnetvm.UnlockBlocks{unlockBlock}, - ) - return -} - -// saveFundingOutputs saves the given slice of indices in StateManager and updates lastFundingOutputAddressIndex. -func (s *StateManager) saveFundingOutputs(fundingOutputs []*FaucetOutput) { - for _, fOutput := range fundingOutputs { - s.fundingState.FundingOutputsAdd(fOutput) - } -} - -// findFundingOutputs looks for funding outputs. -func (s *StateManager) findFundingOutputs() []*FaucetOutput { - foundPreparedOutputs := make([]*FaucetOutput, 0) - - var start, end uint64 - end = s.replenishmentState.GetLastFundingOutputAddressIndex() - if start = end - s.targetFundingOutputsCount; start <= MaxFaucetOutputsCount { - start = MaxFaucetOutputsCount + 1 - } - - if start >= end { - Plugin.LogInfof("No need to search for existing funding outputs, since the faucet is freshly initialized") - return foundPreparedOutputs - } - - Plugin.LogInfof("Looking for existing funding outputs in address range %d to %d...", start, end) - - for i := start; i <= end; i++ { - deps.Indexer.CachedAddressOutputMappings(s.replenishmentState.seed.Address(i).Address()).Consume(func(mapping *indexer.AddressOutputMapping) { - deps.Tangle.Ledger.Storage.CachedOutput(mapping.OutputID()).Consume(func(output utxo.Output) { - deps.Tangle.Ledger.Storage.CachedOutputMetadata(output.ID()).Consume(func(outputMetadata *ledger.OutputMetadata) { - if !outputMetadata.IsSpent() { - outputEssence := output.(devnetvm.Output) - - iotaBalance, colorExist := outputEssence.Balances().Get(devnetvm.ColorIOTA) - if !colorExist { - return - } - if iotaBalance == s.tokensPerRequest { - // we found a prepared output - foundPreparedOutputs = append(foundPreparedOutputs, &FaucetOutput{ - ID: output.ID(), - Balance: iotaBalance, - Address: outputEssence.Address(), - AddressIndex: i, - }) - } - } - }) - }) - }) - } - - Plugin.LogInfof("Found %d funding outputs in the Tangle", len(foundPreparedOutputs)) - Plugin.LogInfof("Looking for funding outputs in the Tangle... DONE") - return foundPreparedOutputs -} - -// findUnspentRemainderOutput finds the remainder output and updates the state manager. -func (s *StateManager) findUnspentRemainderOutput() error { - var foundRemainderOutput *FaucetOutput - - remainderAddress := s.replenishmentState.seed.Address(RemainderAddressIndex).Address() - - // remainder output should sit on address 0 - deps.Indexer.CachedAddressOutputMappings(remainderAddress).Consume(func(mapping *indexer.AddressOutputMapping) { - deps.Tangle.Ledger.Storage.CachedOutput(mapping.OutputID()).Consume(func(output utxo.Output) { - deps.Tangle.Ledger.Storage.CachedOutputMetadata(output.ID()).Consume(func(outputMetadata *ledger.OutputMetadata) { - if !outputMetadata.IsSpent() && deps.Tangle.Ledger.Utils.OutputConfirmationState(outputMetadata.ID()).IsAccepted() { - outputEssence := output.(devnetvm.Output) - - iotaBalance, ok := outputEssence.Balances().Get(devnetvm.ColorIOTA) - if !ok || iotaBalance < uint64(minFaucetBalanceMultiplier*float64(Parameters.GenesisTokenAmount)) { - return - } - if foundRemainderOutput != nil && iotaBalance < foundRemainderOutput.Balance { - // when multiple "big" unspent outputs sit on this address, take the biggest one - return - } - foundRemainderOutput = &FaucetOutput{ - ID: outputEssence.ID(), - Balance: iotaBalance, - Address: outputEssence.Address(), - AddressIndex: RemainderAddressIndex, - } - } - }) - }) - }) - if foundRemainderOutput == nil { - return errors.Errorf("can't find an output on address %s that has at least %d tokens", remainderAddress.Base58(), int(minFaucetBalanceMultiplier*float64(Parameters.GenesisTokenAmount))) - } - s.replenishmentState.SetRemainderOutput(foundRemainderOutput) - - return nil -} - -// findSupplyOutputs looks for targetSupplyOutputsCount number of outputs and updates the StateManager. -func (s *StateManager) findSupplyOutputs() uint64 { - var foundSupplyCount uint64 - var foundOnCurrentAddress bool - - // supply outputs should sit on addresses 1-126 - for supplyAddr := uint64(1); supplyAddr < MaxFaucetOutputsCount+1; supplyAddr++ { - supplyAddress := s.replenishmentState.seed.Address(supplyAddr).Address() - // make sure only one output per address will be added - foundOnCurrentAddress = false - - deps.Indexer.CachedAddressOutputMappings(supplyAddress).Consume(func(mapping *indexer.AddressOutputMapping) { - deps.Tangle.Ledger.Storage.CachedOutput(mapping.OutputID()).Consume(func(output utxo.Output) { - if foundOnCurrentAddress { - return - } - if deps.Tangle.Utils.ConfirmedConsumer(output.ID()) == utxo.EmptyTransactionID && - deps.Tangle.Ledger.Utils.OutputConfirmationState(output.ID()).IsAccepted() { - outputEssence := output.(devnetvm.Output) - - iotaBalance, ok := outputEssence.Balances().Get(devnetvm.ColorIOTA) - if !ok || iotaBalance != s.tokensPerSupplyOutput { - return - } - supplyOutput := &FaucetOutput{ - ID: outputEssence.ID(), - Balance: iotaBalance, - Address: outputEssence.Address(), - AddressIndex: supplyAddr, - } - s.replenishmentState.AddSupplyOutput(supplyOutput) - foundSupplyCount++ - foundOnCurrentAddress = true - } - }) - }) - } - - return foundSupplyCount -} - -// replenishSupplyAndFundingOutputs create a supply transaction splitting up the remainder output to targetSupplyOutputsCount outputs plus a new remainder output. -// After the supply transaction is confirmed it uses each supply output and splits it for splittingMultiplier many times to generate funding outputs. -// After confirmation of each splitting transaction, outputs are added to fundingOutputs list. -// The faucet remainder is stored on address 0. Next 126 indexes are reserved for supply outputs. -func (s *StateManager) replenishSupplyAndFundingOutputs() (err error) { - s.replenishmentState.WaitGroup.Add(1) - defer s.replenishmentState.WaitGroup.Done() - - defer s.replenishmentState.IsReplenishing.UnSet() - - if err = s.findUnspentRemainderOutput(); err != nil { - return errors.Errorf("%w: %w", ErrMissingRemainderOutput, err) - } - - if !s.enoughFundsForSupplyReplenishment() { - err = ErrNotEnoughFunds - return - } - - if err = s.replenishSupplyOutputs(); err != nil { - return errors.Errorf("%w: %w", ErrSupplyPreparationFailed, err) - } - - if err = s.replenishFundingOutputs(); errors.Is(err, ErrSplittingFundsFailed) { - return err - } - return nil -} - -func (s *StateManager) handleReplenishmentErrors(err error) error { - if err != nil { - if errors.Is(err, ErrSplittingFundsFailed) { - err = errors.Errorf("failed to prepare more funding outputs: %w", err) - Plugin.LogError(err) - return err - } - if errors.Is(err, ErrConfirmationTimeoutExpired) { - Plugin.LogInfof("Preparing more funding outputs partially successful: %w", err) - } - } - Plugin.LogInfof("Preparing more outputs... DONE") - return err -} - -// enoughFundsForSupplyReplenishment indicates if there are enough funds left to commence a supply replenishment. -func (s *StateManager) enoughFundsForSupplyReplenishment() bool { - return s.replenishmentState.RemainderOutputBalance() >= s.tokensUsedOnSupplyReplenishment -} - -// replenishSupplyOutputs takes the faucet remainder output and splits it up to create supply outputs that will be used for replenishing the funding outputs. -func (s *StateManager) replenishSupplyOutputs() (err error) { - errChan := make(chan error) - listenerAttachedChan := make(chan types.Empty) - s.splittingEnv = newSplittingEnv() - - go s.updateStateOnConfirmation(1, errChan, listenerAttachedChan) - <-listenerAttachedChan - if _, ok := preparingWorkerPool.TrySubmit(s.supplyTransactionElements, errChan); !ok { - Plugin.LogWarn("supply replenishment task not submitted, queue is full") - } - - // wait for updateStateOnConfirmation to return - return <-s.splittingEnv.listeningFinished -} - -// prepareTransactionTask function for preparation workerPool that uses: provided callback function (param 0) -// to create either supply or split transaction, error channel (param 1) to signal failure during preparation -// or issuance and decrement number of expected confirmations. -func (s *StateManager) prepareTransactionTask(task workerpool.Task) { - transactionElementsCallback := task.Param(0).(func() (inputs devnetvm.Inputs, outputs devnetvm.Outputs, w wallet, err error)) - preparationFailed := task.Param(1).(chan error) - - tx, err := s.createSplittingTx(transactionElementsCallback) - if err != nil { - preparationFailed <- err - return - } - - s.splittingEnv.AddIssuedTxID(tx.ID()) - if _, err = s.issueTx(tx); err != nil { - preparationFailed <- err - return - } -} - -// replenishFundingOutputs splits available supply outputs to funding outputs. -// It listens for transaction confirmation and in parallel submits transaction preparation and issuance to the worker pool. -func (s *StateManager) replenishFundingOutputs() (err error) { - errChan := make(chan error) - listenerAttachedChan := make(chan types.Empty) - supplyToProcess := uint64(s.replenishmentState.SupplyOutputsCount()) - s.splittingEnv = newSplittingEnv() - - go s.updateStateOnConfirmation(supplyToProcess, errChan, listenerAttachedChan) - <-listenerAttachedChan - - for i := uint64(0); i < supplyToProcess; i++ { - if _, ok := preparingWorkerPool.TrySubmit(s.splittingTransactionElements, errChan); !ok { - Plugin.LogWarn("funding outputs replenishment task not submitted, queue is full") - } - } - - // wait for updateStateOnConfirmation to return - return <-s.splittingEnv.listeningFinished -} - -// updateStateOnConfirmation listens for the confirmation and updates the faucet internal state. -// Listening is finished when all issued transactions are confirmed or when the awaiting time is up. -func (s *StateManager) updateStateOnConfirmation(txNumToProcess uint64, preparationFailure <-chan error, listenerAttached chan<- types.Empty) { - Plugin.LogInfof("Start listening for confirmation") - // buffered channel will store all confirmed transactions - txConfirmed := make(chan utxo.TransactionID, txNumToProcess) // length is s.targetSupplyOutputsCount or 1 - - monitorTxAcceptance := event.NewClosure(func(event *ledger.TransactionAcceptedEvent) { - txID := event.TransactionID - if s.splittingEnv.WasIssuedInThisPreparation(txID) { - txConfirmed <- txID - } - }) - - // listen on confirmation - deps.Tangle.Ledger.Events.TransactionAccepted.Attach(monitorTxAcceptance) - defer deps.Tangle.Ledger.Events.TransactionAccepted.Detach(monitorTxAcceptance) - - ticker := time.NewTicker(WaitForAcceptance) - defer ticker.Stop() - - listenerAttached <- types.Empty{} - - // issuedCount indicates number of transactions issued without any errors, declared with max value, - // decremented whenever failure is signaled through the preparationFailure channel - issuedCount := txNumToProcess - - // waiting for transactions to be confirmed - for { - select { - case confirmedTx := <-txConfirmed: - finished := s.onConfirmation(confirmedTx, issuedCount) - if finished { - s.splittingEnv.listeningFinished <- nil - return - } - case <-ticker.C: - finished, err := s.onTickerCheckMaxAttempts(issuedCount) - if finished { - s.splittingEnv.listeningFinished <- err - return - } - case err := <-preparationFailure: - Plugin.LogErrorf("transaction preparation failed: %s", err) - issuedCount-- - case <-s.shutdownSignal: - s.splittingEnv.listeningFinished <- nil - } - } -} - -func (s *StateManager) onTickerCheckMaxAttempts(issuedCount uint64) (finished bool, err error) { - if s.splittingEnv.timeoutCount.Load() >= MaxWaitAttempts { - if s.splittingEnv.confirmedCount.Load() == 0 { - err = ErrSplittingFundsFailed - return true, err - } - return true, errors.Errorf("confirmed %d and saved %d out of %d issued transactions: %w", s.splittingEnv.confirmedCount.Load(), s.splittingEnv.updateStateCount.Load(), issuedCount, ErrConfirmationTimeoutExpired) - } - s.splittingEnv.timeoutCount.Add(1) - return false, err -} - -func (s *StateManager) onConfirmation(confirmedTx utxo.TransactionID, issuedCount uint64) (finished bool) { - s.splittingEnv.confirmedCount.Add(1) - err := s.updateState(confirmedTx) - if err == nil { - s.splittingEnv.updateStateCount.Add(1) - } - // all issued transactions have been confirmed - if s.splittingEnv.confirmedCount.Load() == issuedCount { - return true - } - return false -} - -// updateState takes a confirmed transaction (splitting or supply tx), and updates the faucet internal state based on its content. -func (s *StateManager) updateState(transactionID utxo.TransactionID) (err error) { - deps.Tangle.Ledger.Storage.CachedTransaction(transactionID).Consume(func(transaction utxo.Transaction) { - tx, ok := transaction.(*devnetvm.Transaction) - if !ok { - return - } - - newFaucetRemainderBalance := s.replenishmentState.RemainderOutputBalance() - s.tokensUsedOnSupplyReplenishment - - // derive information from outputs - for _, output := range tx.Essence().Outputs() { - iotaBalance, hasIota := output.Balances().Get(devnetvm.ColorIOTA) - if !hasIota { - err = errors.Errorf("tx outputs don't have IOTA balance ") - return - } - switch iotaBalance { - case s.tokensPerRequest: - s.fundingState.FundingOutputsAdd(&FaucetOutput{ - ID: output.ID(), - Balance: iotaBalance, - Address: output.Address(), - AddressIndex: s.replenishmentState.GetAddressToIndex(output.Address().Base58()), - }) - case newFaucetRemainderBalance: - s.replenishmentState.SetRemainderOutput(&FaucetOutput{ - ID: output.ID(), - Balance: iotaBalance, - Address: output.Address(), - AddressIndex: s.replenishmentState.GetAddressToIndex(output.Address().Base58()), - }) - case s.tokensPerSupplyOutput: - s.replenishmentState.AddSupplyOutput(&FaucetOutput{ - ID: output.ID(), - Balance: iotaBalance, - Address: output.Address(), - AddressIndex: s.replenishmentState.GetAddressToIndex(output.Address().Base58()), - }) - default: - err = errors.Errorf("tx %s should not have output with balance %d", transactionID.Base58(), iotaBalance) - return - } - } - }) - - return err -} - -// createSplittingTx creates splitting transaction based on provided callback function. -func (s *StateManager) createSplittingTx(transactionElementsCallback func() (devnetvm.Inputs, devnetvm.Outputs, wallet, error)) (*devnetvm.Transaction, error) { - inputs, outputs, w, err := transactionElementsCallback() - if err != nil { - return nil, err - } - essence := devnetvm.NewTransactionEssence( - 0, - clock.SyncedTime(), - deps.Local.ID(), - // consensus mana is pledged to EmptyNodeID - identity.ID{}, - devnetvm.NewInputs(inputs...), - devnetvm.NewOutputs(outputs...), - ) - - unlockBlock := devnetvm.NewSignatureUnlockBlock(w.sign(essence)) - - tx := devnetvm.NewTransaction( - essence, - devnetvm.UnlockBlocks{unlockBlock}, - ) - return tx, nil -} - -// supplyTransactionElements is a callback function used during supply transaction creation. -// It takes the current remainder output and creates a supply transaction into targetSupplyOutputsCount -// outputs and one remainder output. It uses address indices 1 to targetSupplyOutputsCount because each address in -// a transaction output has to be unique and can prepare at most MaxFaucetOutputsCount supply outputs at once. -func (s *StateManager) supplyTransactionElements() (inputs devnetvm.Inputs, outputs devnetvm.Outputs, w wallet, err error) { - inputs = devnetvm.NewInputs(devnetvm.NewUTXOInput(s.replenishmentState.RemainderOutputID())) - // prepare targetSupplyOutputsCount number of supply outputs for further splitting. - outputs = make(devnetvm.Outputs, 0, s.targetSupplyOutputsCount+1) - - // all funding outputs will land on supply addresses 1 to 126 - for index := uint64(1); index < s.targetSupplyOutputsCount+1; index++ { - outputs = append(outputs, s.createOutput(s.replenishmentState.seed.Address(index).Address(), s.tokensPerSupplyOutput)) - s.replenishmentState.AddAddressToIndex(s.replenishmentState.seed.Address(index).Address().Base58(), index) - } - - // add the remainder output - remainder := s.replenishmentState.RemainderOutputBalance() - s.tokensPerSupplyOutput*s.targetSupplyOutputsCount - outputs = append(outputs, s.createOutput(s.replenishmentState.seed.Address(RemainderAddressIndex).Address(), remainder)) - - w = wallet{keyPair: *s.replenishmentState.seed.KeyPair(RemainderAddressIndex)} - return -} - -// splittingTransactionElements is a callback function used during creation of splitting transactions. -// It splits a supply output into funding outputs and uses lastFundingOutputAddressIndex to derive their target address. -func (s *StateManager) splittingTransactionElements() (inputs devnetvm.Inputs, outputs devnetvm.Outputs, w wallet, err error) { - supplyOutput, err := s.replenishmentState.NextSupplyOutput() - if err != nil { - err = errors.Errorf("could not retrieve supply output: %w", err) - return - } - - inputs = devnetvm.NewInputs(devnetvm.NewUTXOInput(supplyOutput.ID)) - outputs = make(devnetvm.Outputs, 0, s.splittingMultiplier) - - for i := uint64(0); i < s.splittingMultiplier; i++ { - index := s.replenishmentState.IncrLastFundingOutputAddressIndex() - addr := s.replenishmentState.seed.Address(index).Address() - outputs = append(outputs, s.createOutput(addr, s.tokensPerRequest)) - s.replenishmentState.AddAddressToIndex(addr.Base58(), index) - } - w = wallet{keyPair: *s.replenishmentState.seed.KeyPair(supplyOutput.AddressIndex)} - - return -} - -// createOutput creates an output based on provided address and balance. -func (s *StateManager) createOutput(addr devnetvm.Address, balance uint64) devnetvm.Output { - return devnetvm.NewSigLockedColoredOutput( - devnetvm.NewColoredBalances( - map[devnetvm.Color]uint64{ - devnetvm.ColorIOTA: balance, - }), - addr, - ) -} - -// issueTx issues a transaction to the Tangle and waits for it to become booked. -func (s *StateManager) issueTx(tx *devnetvm.Transaction) (blk *tangleold.Block, err error) { - // attach to block layer - issueTransaction := func() (*tangleold.Block, error) { - block, e := deps.Tangle.IssuePayload(tx) - if e != nil { - return nil, e - } - return block, nil - } - - // block for a certain amount of time until we know that the transaction - // actually got booked by this node itself - // TODO: replace with an actual more reactive way - blk, err = blocklayer.AwaitBlockToBeBooked(issueTransaction, tx.ID(), s.maxTxBookedAwaitTime) - if err != nil { - return nil, errors.Errorf("%w: tx %s", err, tx.ID().String()) - } - return blk, nil -} - -// splittingEnv provides variables used for synchronization during splitting transactions. -type splittingEnv struct { - // preparedTxID is a map that stores prepared and issued transaction IDs - issuedTxIDs map[utxo.TransactionID]types.Empty - sync.RWMutex - - // channel to signal that listening has finished - listeningFinished chan error - - // counts confirmed transactions during listening - confirmedCount *atomic.Uint64 - - // counts successful splits - updateStateCount *atomic.Uint64 - - // counts max attempts while listening for confirmation - timeoutCount *atomic.Uint64 -} - -func newSplittingEnv() *splittingEnv { - return &splittingEnv{ - issuedTxIDs: make(map[utxo.TransactionID]types.Empty), - listeningFinished: make(chan error), - confirmedCount: atomic.NewUint64(0), - updateStateCount: atomic.NewUint64(0), - timeoutCount: atomic.NewUint64(0), - } -} - -// WasIssuedInThisPreparation indicates if given transaction was issued during this lifespan of splittingEnv. -func (s *splittingEnv) WasIssuedInThisPreparation(transactionID utxo.TransactionID) bool { - s.RLock() - defer s.RUnlock() - - _, ok := s.issuedTxIDs[transactionID] - return ok -} - -// IssuedTransactionsCount returns how many transactions was issued this far. -func (s *splittingEnv) IssuedTransactionsCount() uint64 { - s.RLock() - defer s.RUnlock() - - return uint64(len(s.issuedTxIDs)) -} - -// AddIssuedTxID adds transactionID to the issuedTxIDs map. -func (s *splittingEnv) AddIssuedTxID(txID utxo.TransactionID) { - s.Lock() - defer s.Unlock() - s.issuedTxIDs[txID] = types.Void -} - -// fundingState manages fundingOutputs and its mutex. -type fundingState struct { - // ordered list of available outputs to fund faucet requests - fundingOutputs *list.List - - sync.RWMutex -} - -func newFundingState() *fundingState { - state := &fundingState{ - fundingOutputs: list.New(), - } - - return state -} - -// FundingOutputsCount returns the number of available outputs that can be used to fund a request. -func (f *fundingState) FundingOutputsCount() int { - f.RLock() - defer f.RUnlock() - - return f.fundingOutputsCount() -} - -// FundingOutputsAdd adds FaucetOutput to the fundingOutputs list. -func (f *fundingState) FundingOutputsAdd(fundingOutput *FaucetOutput) { - f.Lock() - defer f.Unlock() - - f.fundingOutputs.PushBack(fundingOutput) -} - -// GetFundingOutput returns the first funding output in the list. -func (f *fundingState) GetFundingOutput() (fundingOutput *FaucetOutput, err error) { - f.Lock() - defer f.Unlock() - - if f.fundingOutputsCount() < 1 { - return nil, ErrNotEnoughFundingOutputs - } - fundingOutput = f.fundingOutputs.Remove(f.fundingOutputs.Front()).(*FaucetOutput) - return -} - -func (f *fundingState) fundingOutputsCount() int { - return f.fundingOutputs.Len() -} - -// replenishmentState keeps all variables and related methods used to track faucet state during replenishment. -type replenishmentState struct { - // output that holds the remainder funds to the faucet, should always be on address 0 - remainderOutput *FaucetOutput - // outputs that hold funds during the replenishment phase, filled in only with outputs needed for next split, should always be on address 1 - supplyOutputs *list.List - // the last funding output address index, should start from MaxFaucetOutputsCount + 1 - // when we prepare new funding outputs, we start from lastFundingOutputAddressIndex + 1 - lastFundingOutputAddressIndex uint64 - // mapping base58 encoded addresses to their indices - addressToIndex map[string]uint64 - // the seed instance of the faucet holding the tokens - seed *walletseed.Seed - // IsReplenishing indicates if faucet is currently replenishing the next batch of funding outputs - IsReplenishing typeutils.AtomicBool - - // is used when fulfilling request for waiting for more funds in case they were not prepared on time - sync.WaitGroup - // ensures that fields related to new funds creation can be accesses by only one goroutine at the same time - sync.RWMutex -} - -func newPreparingState(seed *walletseed.Seed) *replenishmentState { - state := &replenishmentState{ - seed: seed, - addressToIndex: map[string]uint64{ - seed.Address(RemainderAddressIndex).Address().Base58(): RemainderAddressIndex, - }, - lastFundingOutputAddressIndex: MaxFaucetOutputsCount, - supplyOutputs: list.New(), - remainderOutput: nil, - } - return state -} - -// RemainderOutputBalance returns the balance value of remainderOutput. -func (p *replenishmentState) RemainderOutputBalance() uint64 { - p.RLock() - defer p.RUnlock() - return p.remainderOutput.Balance -} - -// RemainderOutputID returns the OutputID of remainderOutput. -func (p *replenishmentState) RemainderOutputID() utxo.OutputID { - p.RLock() - defer p.RUnlock() - id := p.remainderOutput.ID - return id -} - -// SetRemainderOutput sets provided output as remainderOutput. -func (p *replenishmentState) SetRemainderOutput(output *FaucetOutput) { - p.Lock() - defer p.Unlock() - - p.remainderOutput = output -} - -// nextSupplyOutput returns the first supply address in the list. -func (p *replenishmentState) NextSupplyOutput() (supplyOutput *FaucetOutput, err error) { - p.Lock() - defer p.Unlock() - - if p.supplyOutputsCount() < 1 { - return nil, ErrNotEnoughSupplyOutputs - } - element := p.supplyOutputs.Front() - supplyOutput = p.supplyOutputs.Remove(element).(*FaucetOutput) - return -} - -// SupplyOutputsCount returns the number of available outputs that can be split to prepare more faucet funds. -func (p *replenishmentState) SupplyOutputsCount() int { - p.RLock() - defer p.RUnlock() - - return p.supplyOutputsCount() -} - -func (p *replenishmentState) supplyOutputsCount() int { - return p.supplyOutputs.Len() -} - -// AddSupplyOutput adds FaucetOutput to the supplyOutputs. -func (p *replenishmentState) AddSupplyOutput(output *FaucetOutput) { - p.Lock() - defer p.Unlock() - - p.supplyOutputs.PushBack(output) -} - -// IncrLastFundingOutputAddressIndex increments and returns the new lastFundingOutputAddressIndex value. -func (p *replenishmentState) IncrLastFundingOutputAddressIndex() uint64 { - p.Lock() - defer p.Unlock() - - p.lastFundingOutputAddressIndex++ - return p.lastFundingOutputAddressIndex -} - -// GetLastFundingOutputAddressIndex returns current lastFundingOutputAddressIndex value. -func (p *replenishmentState) GetLastFundingOutputAddressIndex() uint64 { - p.RLock() - defer p.RUnlock() - - return p.lastFundingOutputAddressIndex -} - -// GetLastFundingOutputAddressIndex sets new lastFundingOutputAddressIndex. -func (p *replenishmentState) SetLastFundingOutputAddressIndex(index uint64) { - p.Lock() - defer p.Unlock() - - p.lastFundingOutputAddressIndex = index -} - -// GetAddressToIndex returns index for provided address based on addressToIndex map. -func (p *replenishmentState) GetAddressToIndex(addr string) uint64 { - p.RLock() - defer p.RUnlock() - - return p.addressToIndex[addr] -} - -// AddAddressToIndex adds address and corresponding index to the addressToIndex map. -func (p *replenishmentState) AddAddressToIndex(addr string, index uint64) { - p.Lock() - defer p.Unlock() - - p.addressToIndex[addr] = index -} - -type wallet struct { - keyPair ed25519.KeyPair -} - -func (w wallet) privateKey() ed25519.PrivateKey { - return w.keyPair.PrivateKey -} - -func (w wallet) publicKey() ed25519.PublicKey { - return w.keyPair.PublicKey -} - -func (w wallet) sign(txEssence *devnetvm.TransactionEssence) *devnetvm.ED25519Signature { - return devnetvm.NewED25519Signature(w.publicKey(), w.privateKey().Sign(lo.PanicOnErr(txEssence.Bytes()))) -} diff --git a/plugins/warpsync/parameters.go b/plugins/warpsync/parameters.go new file mode 100644 index 0000000000..bf7e933280 --- /dev/null +++ b/plugins/warpsync/parameters.go @@ -0,0 +1,24 @@ +package warpsync + +import ( + "time" + + "github.com/iotaledger/goshimmer/plugins/config" +) + +// ParametersDefinition contains the definition of configuration parameters used by the gossip plugin. +type ParametersDefinition struct { + // Concurrency defines the amount of epochs to attempt to sync at the same time. + Concurrency int `default:"10" usage:"the amount of epochs to attempt to sync at the same time"` + // BlockBatchSize defines the amount of blocks to send in a single epoch blocks response"` + BlockBatchSize int `default:"100" usage:"the amount of blocks to send in a single epoch blocks response"` + // SyncRangeTimeOut defines the time after which a sync range is considered as failed. + SyncRangeTimeOut time.Duration `default:"5m" usage:"the time after which a sync range is considered as failed"` +} + +// Parameters contains the configuration parameters of the gossip plugin. +var Parameters = &ParametersDefinition{} + +func init() { + config.BindParameters(Parameters, "warpsync") +} diff --git a/plugins/warpsync/plugin.go b/plugins/warpsync/plugin.go new file mode 100644 index 0000000000..0558eec631 --- /dev/null +++ b/plugins/warpsync/plugin.go @@ -0,0 +1,89 @@ +package warpsync + +import ( + "context" + + "go.uber.org/dig" + + "github.com/iotaledger/hive.go/core/autopeering/peer" + "github.com/iotaledger/hive.go/core/daemon" + "github.com/iotaledger/hive.go/core/generics/event" + "github.com/iotaledger/hive.go/core/node" + "github.com/pkg/errors" + + "github.com/iotaledger/goshimmer/packages/core/tangleold" + "github.com/iotaledger/goshimmer/packages/node/p2p" + "github.com/iotaledger/goshimmer/packages/node/shutdown" + "github.com/iotaledger/goshimmer/packages/node/warpsync" + + "github.com/iotaledger/goshimmer/packages/core/notarization" +) + +// PluginName is the name of the warpsync plugin. +const PluginName = "Warpsync" + +var ( + // Plugin is the plugin instance of the warpsync plugin. + Plugin *node.Plugin + + deps = new(dependencies) +) + +type dependencies struct { + dig.In + + Tangle *tangleold.Tangle + WarpsyncMgr *warpsync.Manager + NotarizationMgr *notarization.Manager + P2PMgr *p2p.Manager +} + +func init() { + Plugin = node.NewPlugin(PluginName, deps, node.Enabled, configure, run) + + Plugin.Events.Init.Hook(event.NewClosure(func(event *node.InitEvent) { + if err := event.Container.Provide(func(t *tangleold.Tangle, p2pManager *p2p.Manager) *warpsync.Manager { + // TODO: use a different block loader function + loadBlockFunc := func(blockID tangleold.BlockID) (*tangleold.Block, error) { + cachedBlock := t.Storage.Block(blockID) + defer cachedBlock.Release() + block, exists := cachedBlock.Unwrap() + if !exists { + return nil, errors.Errorf("block %s not found", blockID) + } + return block, nil + } + processBlockFunc := func(blk *tangleold.Block, peer *peer.Peer) { + t.Parser.Events.BlockParsed.Trigger(&tangleold.BlockParsedEvent{ + Block: blk, + Peer: peer, + }) + } + return warpsync.NewManager(p2pManager, loadBlockFunc, processBlockFunc, Plugin.Logger(), warpsync.WithConcurrency(Parameters.Concurrency), warpsync.WithBlockBatchSize(Parameters.BlockBatchSize)) + }); err != nil { + Plugin.Panic(err) + } + })) +} + +func configure(_ *node.Plugin) { + deps.NotarizationMgr.Events.SyncRange.Attach(event.NewClosure(func(event *notarization.SyncRangeEvent) { + ctx, cancel := context.WithTimeout(context.Background(), Parameters.SyncRangeTimeOut) + defer cancel() + if err := deps.WarpsyncMgr.WarpRange(ctx, event.StartEI, event.EndEI, event.StartEC, event.EndPrevEC); err != nil { + Plugin.LogWarn("failed to warpsync:", err) + } + })) +} + +func start(ctx context.Context) { + defer Plugin.LogInfo("Stopping " + PluginName + " ... done") + <-ctx.Done() + Plugin.LogInfo("Stopping " + PluginName + " ...") +} + +func run(plugin *node.Plugin) { + if err := daemon.BackgroundWorker(PluginName, start, shutdown.PriorityWarpsync); err != nil { + plugin.Logger().Panicf("Failed to start as daemon: %s", err) + } +} diff --git a/plugins/webapi/block/plugin.go b/plugins/webapi/block/plugin.go index c861c1f2cd..9a062d2d1d 100644 --- a/plugins/webapi/block/plugin.go +++ b/plugins/webapi/block/plugin.go @@ -18,7 +18,6 @@ import ( "github.com/iotaledger/goshimmer/packages/core/ledger/utxo" "github.com/iotaledger/goshimmer/packages/core/ledger/vm/devnetvm" "github.com/iotaledger/goshimmer/packages/core/markers" - "github.com/iotaledger/goshimmer/packages/core/notarization" "github.com/iotaledger/goshimmer/packages/core/tangleold" ) @@ -65,10 +64,10 @@ func GetSequence(c echo.Context) (err error) { if deps.Tangle.Booker.MarkersManager.Sequence(sequenceID).Consume(func(sequence *markers.Sequence) { blockWithLastMarker := deps.Tangle.Booker.MarkersManager.BlockID(markers.NewMarker(sequenceID, sequence.HighestIndex())) err = c.String(http.StatusOK, stringify.Struct("Sequence", - stringify.StructField("ID", sequence.ID()), - stringify.StructField("LowestIndex", sequence.LowestIndex()), - stringify.StructField("HighestIndex", sequence.HighestIndex()), - stringify.StructField("BlockWithLastMarker", blockWithLastMarker), + stringify.NewStructField("ID", sequence.ID()), + stringify.NewStructField("LowestIndex", sequence.LowestIndex()), + stringify.NewStructField("HighestIndex", sequence.HighestIndex()), + stringify.NewStructField("BlockWithLastMarker", blockWithLastMarker), )) }) { return @@ -112,7 +111,7 @@ func GetBlock(c echo.Context) (err error) { var payloadBytes []byte payloadBytes, err = block.Payload().Bytes() - ecRecord := epoch.NewECRecord(block.EI()) + ecRecord := epoch.NewECRecord(block.ECRecordEI()) ecRecord.SetECR(block.ECR()) ecRecord.SetPrevEC(block.PrevEC()) @@ -135,8 +134,8 @@ func GetBlock(c echo.Context) (err error) { return "" }(), - EC: notarization.EC(ecRecord).Base58(), - EI: uint64(block.EI()), + EC: ecRecord.ComputeEC().Base58(), + EI: uint64(block.ECRecordEI()), ECR: block.ECR().Base58(), PrevEC: block.PrevEC().Base58(), Payload: payloadBytes, diff --git a/plugins/webapi/epoch/plugin.go b/plugins/webapi/epoch/plugin.go index e5bbe4691a..e9c3bd783b 100644 --- a/plugins/webapi/epoch/plugin.go +++ b/plugins/webapi/epoch/plugin.go @@ -51,8 +51,8 @@ func configure(_ *node.Plugin) { func getAllCommittedEpochs(c echo.Context) error { allEpochs := epochstorage.GetCommittableEpochs() allEpochsInfos := make([]*jsonmodels.EpochInfo, 0, len(allEpochs)) - for _, ecr := range allEpochs { - allEpochsInfos = append(allEpochsInfos, jsonmodels.EpochInfoFromRecord(ecr)) + for _, ecRecord := range allEpochs { + allEpochsInfos = append(allEpochsInfos, jsonmodels.EpochInfoFromRecord(ecRecord)) } sort.Slice(allEpochsInfos, func(i, j int) bool { return allEpochsInfos[i].EI < allEpochsInfos[j].EI @@ -65,7 +65,7 @@ func getCurrentEC(c echo.Context) error { if err != nil { return c.JSON(http.StatusInternalServerError, jsonmodels.NewErrorResponse(err)) } - ec := notarization.EC(ecRecord) + ec := ecRecord.ComputeEC() return c.JSON(http.StatusOK, ec.Base58()) } @@ -107,7 +107,7 @@ func getBlocks(c echo.Context) error { if err != nil { return c.JSON(http.StatusBadRequest, jsonmodels.NewErrorResponse(err)) } - blockIDs := epochstorage.GetEpochblocks(ei) + blockIDs := epochstorage.GetEpochBlockIDs(ei) blocks := make([]string, len(blockIDs)) for i, m := range blockIDs { diff --git a/plugins/webapi/snapshot/plugin.go b/plugins/webapi/snapshot/plugin.go index 6808d95db9..8631ccec42 100644 --- a/plugins/webapi/snapshot/plugin.go +++ b/plugins/webapi/snapshot/plugin.go @@ -9,26 +9,20 @@ import ( "github.com/labstack/echo" "github.com/iotaledger/goshimmer/packages/app/jsonmodels" - "github.com/iotaledger/goshimmer/packages/core/epoch" - "github.com/iotaledger/goshimmer/packages/core/ledger" - "github.com/iotaledger/goshimmer/packages/core/notarization" - "github.com/iotaledger/goshimmer/packages/core/tangleold" - "github.com/iotaledger/goshimmer/packages/core/snapshot" ) // region Plugin /////////////////////////////////////////////////////////////////////////////////////////////////////// const ( - snapshotFileName = "snapshot.bin" + snapshotFileName = "/tmp/snapshot.bin" ) type dependencies struct { dig.In - Server *echo.Echo - Tangle *tangleold.Tangle - NotarizationMgr *notarization.Manager + Server *echo.Echo + SnapshotMgr *snapshot.Manager } var ( @@ -39,7 +33,7 @@ var ( ) func init() { - Plugin = node.NewPlugin("Snapshot", deps, node.Disabled, configure) + Plugin = node.NewPlugin("WebAPISnapshot", deps, node.Disabled, configure) } func configure(_ *node.Plugin) { @@ -52,20 +46,7 @@ func configure(_ *node.Plugin) { // DumpCurrentLedger dumps a snapshot (all unspent UTXO and all of the access mana) from now. func DumpCurrentLedger(c echo.Context) (err error) { - ecRecord, lastConfirmedEpoch, err := deps.Tangle.Options.CommitmentFunc() - if err != nil { - return c.JSON(http.StatusInternalServerError, jsonmodels.NewErrorResponse(err)) - } - - // lock the entire ledger in notarization manager until the snapshot is created. - deps.NotarizationMgr.WriteLockLedger() - defer deps.NotarizationMgr.WriteUnlockLedger() - - headerPord := headerProducer(ecRecord, lastConfirmedEpoch) - outputWithMetadataProd := snapshot.NewLedgerUTXOStatesProducer(lastConfirmedEpoch, deps.NotarizationMgr) - epochDiffsProd := snapshot.NewEpochDiffsProducer(lastConfirmedEpoch, ecRecord.EI(), deps.NotarizationMgr) - - header, err := snapshot.CreateSnapshot(snapshotFileName, headerPord, outputWithMetadataProd, epochDiffsProd) + header, err := deps.SnapshotMgr.CreateSnapshot(snapshotFileName) if err != nil { Plugin.LogErrorf("unable to get snapshot bytes %s", err) return c.JSON(http.StatusInternalServerError, jsonmodels.NewErrorResponse(err)) @@ -79,15 +60,4 @@ func DumpCurrentLedger(c echo.Context) (err error) { return c.Attachment(snapshotFileName, snapshotFileName) } -func headerProducer(ecRecord *epoch.ECRecord, lastConfirmedEpoch epoch.Index) snapshot.HeaderProducerFunc { - return func() (header *ledger.SnapshotHeader, err error) { - header = &ledger.SnapshotHeader{ - FullEpochIndex: lastConfirmedEpoch, - DiffEpochIndex: ecRecord.EI(), - LatestECRecord: ecRecord, - } - return header, nil - } -} - // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/plugins/webapi/weightprovider/plugin.go b/plugins/webapi/weightprovider/plugin.go index 17e6f196a1..369bf8f914 100644 --- a/plugins/webapi/weightprovider/plugin.go +++ b/plugins/webapi/weightprovider/plugin.go @@ -33,11 +33,12 @@ func configure(_ *node.Plugin) { } func getNodesHandler(c echo.Context) (err error) { - activeNodes := deps.Tangle.WeightProvider.(*tangleold.CManaWeightProvider).ActiveNodes() + activeNodes, _ := deps.Tangle.WeightProvider.(*tangleold.CManaWeightProvider).WeightsOfRelevantVoters() - activeNodesString := make(map[string][]int64) - for nodeID, al := range activeNodes { - activeNodesString[nodeID.String()] = al.Times() + activeNodesString := make([]string, 0) + + for id := range activeNodes { + activeNodesString = append(activeNodesString, id.String()) } return c.JSON(http.StatusOK, activeNodesString) diff --git a/tools/docker-network/docker-network.snapshot b/tools/docker-network/docker-network.snapshot index 0ecab7e575..9cd139ccb6 100644 Binary files a/tools/docker-network/docker-network.snapshot and b/tools/docker-network/docker-network.snapshot differ diff --git a/tools/genesis-snapshot/main.go b/tools/genesis-snapshot/main.go index 0b5d968572..a26a9040a3 100644 --- a/tools/genesis-snapshot/main.go +++ b/tools/genesis-snapshot/main.go @@ -2,16 +2,16 @@ package main import ( "fmt" + "github.com/iotaledger/goshimmer/packages/core/epoch" "log" "github.com/iotaledger/hive.go/core/identity" + "github.com/mr-tron/base58" - "github.com/iotaledger/goshimmer/packages/core/epoch" "github.com/iotaledger/goshimmer/packages/core/ledger" "github.com/iotaledger/goshimmer/packages/core/snapshot" "github.com/iotaledger/goshimmer/tools/genesis-snapshot/snapshotcreator" - "github.com/mr-tron/base58" flag "github.com/spf13/pflag" "github.com/spf13/viper" ) @@ -139,14 +139,20 @@ func readSnapshotFromFile(filePath string) (err error) { outputWithMetadataConsumer := func(outputWithMetadatas []*ledger.OutputWithMetadata) { fmt.Println(outputWithMetadatas) } - epochDiffsConsumer := func(_ *ledger.SnapshotHeader, epochDiffs map[epoch.Index]*ledger.EpochDiff) { + epochDiffConsumer := func(epochDiffs *ledger.EpochDiff) { fmt.Println(epochDiffs) } headerConsumer := func(h *ledger.SnapshotHeader) { fmt.Println(h) } + activityLogConsumer := func(activity epoch.SnapshotEpochActivity) { + fmt.Println(activity) + } + sepsConsumer := func(s *snapshot.SolidEntryPoints) { + fmt.Println(s) + } - err = snapshot.LoadSnapshot(filePath, headerConsumer, outputWithMetadataConsumer, epochDiffsConsumer) + err = snapshot.LoadSnapshot(filePath, headerConsumer, sepsConsumer, outputWithMetadataConsumer, epochDiffConsumer, activityLogConsumer) return } diff --git a/tools/genesis-snapshot/snapshotcreator/snapshotcreator.go b/tools/genesis-snapshot/snapshotcreator/snapshotcreator.go index 5b66a12538..b160fb7e0c 100644 --- a/tools/genesis-snapshot/snapshotcreator/snapshotcreator.go +++ b/tools/genesis-snapshot/snapshotcreator/snapshotcreator.go @@ -12,6 +12,7 @@ import ( "github.com/iotaledger/goshimmer/packages/core/ledger" "github.com/iotaledger/goshimmer/packages/core/ledger/utxo" "github.com/iotaledger/goshimmer/packages/core/ledger/vm/devnetvm" + "github.com/iotaledger/goshimmer/packages/core/tangleold" "github.com/iotaledger/goshimmer/client/wallet/packages/seed" "github.com/iotaledger/goshimmer/packages/core/snapshot" @@ -44,9 +45,13 @@ func CreateSnapshot(snapshotFileName string, genesisTokenAmount uint64, genesisS return } - epochDiffsProd := func() (epochDiffs map[epoch.Index]*ledger.EpochDiff, err error) { - epochDiffs = make(map[epoch.Index]*ledger.EpochDiff) - fmt.Println(epochDiffs) + sepsProd := func() (sep *snapshot.SolidEntryPoints) { + return &snapshot.SolidEntryPoints{EI: epoch.Index(0), Seps: make([]tangleold.BlockID, 0)} + } + + epochDiffsProd := func() (diffs *ledger.EpochDiff) { + outputs := make([]*ledger.OutputWithMetadata, 0) + diffs = ledger.NewEpochDiff(outputs, outputs) return } @@ -55,11 +60,17 @@ func CreateSnapshot(snapshotFileName string, genesisTokenAmount uint64, genesisS output, outputMetadata := createOutput(seed.NewSeed(genesisSeedBytes).Address(0).Address(), genesisTokenAmount, identity.ID{}, now) outputsWithMetadata = append(outputsWithMetadata, ledger.NewOutputWithMetadata(output.ID(), output, outputMetadata.CreationTime(), outputMetadata.ConsensusManaPledgeID(), outputMetadata.AccessManaPledgeID())) + // prepare activity log + epochActivity := epoch.NewSnapshotEpochActivity() + epochActivity[epoch.Index(0)] = epoch.NewSnapshotNodeActivity() + for nodeID, value := range nodesToPledge { // pledge to ID but send funds to random address output, outputMetadata = createOutput(devnetvm.NewED25519Address(ed25519.GenerateKeyPair().PublicKey), value, nodeID, now) outputsWithMetadata = append(outputsWithMetadata, ledger.NewOutputWithMetadata(output.ID(), output, outputMetadata.CreationTime(), outputMetadata.ConsensusManaPledgeID(), outputMetadata.AccessManaPledgeID())) + epochActivity[epoch.Index(0)].SetNodeActivity(nodeID, 1) } + i := 0 utxoStatesProd := func() *ledger.OutputWithMetadata { @@ -73,7 +84,11 @@ func CreateSnapshot(snapshotFileName string, genesisTokenAmount uint64, genesisS return o } - _, err = snapshot.CreateSnapshot(snapshotFileName, headerProd, utxoStatesProd, epochDiffsProd) + activityLogProd := func() epoch.SnapshotEpochActivity { + return epochActivity + } + + _, err = snapshot.CreateSnapshot(snapshotFileName, headerProd, sepsProd, utxoStatesProd, epochDiffsProd, activityLogProd) return } @@ -90,15 +105,21 @@ func CreateSnapshotForIntegrationTest(snapshotFileName string, genesisTokenAmoun now := time.Now() outputsWithMetadata := make([]*ledger.OutputWithMetadata, 0) + // prepare activity log + epochActivity := epoch.NewSnapshotEpochActivity() + epochActivity[epoch.Index(0)] = epoch.NewSnapshotNodeActivity() + // This is the same seed used to derive the faucet ID. genesisPledgeID := identity.New(ed25519.PrivateKeyFromSeed(genesisNodePledge).Public()).ID() output, outputMetadata := createOutput(seed.NewSeed(genesisSeedBytes).Address(0).Address(), genesisTokenAmount, genesisPledgeID, now) outputsWithMetadata = append(outputsWithMetadata, ledger.NewOutputWithMetadata(output.ID(), output, outputMetadata.CreationTime(), outputMetadata.ConsensusManaPledgeID(), outputMetadata.AccessManaPledgeID())) + epochActivity[epoch.Index(0)].SetNodeActivity(genesisPledgeID, 1) for nodeSeedBytes, value := range nodesToPledge { nodeID := identity.New(ed25519.PrivateKeyFromSeed(nodeSeedBytes[:]).Public()).ID() output, outputMetadata = createOutput(seed.NewSeed(nodeSeedBytes[:]).Address(0).Address(), value, nodeID, now) outputsWithMetadata = append(outputsWithMetadata, ledger.NewOutputWithMetadata(output.ID(), output, outputMetadata.CreationTime(), outputMetadata.ConsensusManaPledgeID(), outputMetadata.AccessManaPledgeID())) + epochActivity[epoch.Index(0)].SetNodeActivity(nodeID, 1) } i := 0 utxoStatesProd := func() *ledger.OutputWithMetadata { @@ -128,13 +149,21 @@ func CreateSnapshotForIntegrationTest(snapshotFileName string, genesisTokenAmoun return } - epochDiffsProd := func() (epochDiffs map[epoch.Index]*ledger.EpochDiff, err error) { - epochDiffs = make(map[epoch.Index]*ledger.EpochDiff) - fmt.Println(epochDiffs) + sepsProd := func() (sep *snapshot.SolidEntryPoints) { + return &snapshot.SolidEntryPoints{EI: epoch.Index(0), Seps: make([]tangleold.BlockID, 0)} + } + + epochDiffsProd := func() (diffs *ledger.EpochDiff) { + outputs := make([]*ledger.OutputWithMetadata, 0) + diffs = ledger.NewEpochDiff(outputs, outputs) return } - _, err = snapshot.CreateSnapshot(snapshotFileName, headerProd, utxoStatesProd, epochDiffsProd) + activityLogProd := func() epoch.SnapshotEpochActivity { + return epochActivity + } + + _, err = snapshot.CreateSnapshot(snapshotFileName, headerProd, sepsProd, utxoStatesProd, epochDiffsProd, activityLogProd) return } diff --git a/tools/integration-tests/tester/framework/config/config.go b/tools/integration-tests/tester/framework/config/config.go index 0abd8625ac..8d79470267 100644 --- a/tools/integration-tests/tester/framework/config/config.go +++ b/tools/integration-tests/tester/framework/config/config.go @@ -76,7 +76,7 @@ type Database struct { database.ParametersDefinition } -// Gossip defines the parameters of the gossip plugin. +// P2P defines the parameters of the gossip plugin. type P2P struct { Enabled bool @@ -97,14 +97,14 @@ type POW struct { pow.ParametersDefinition } -// Webapi defines the parameters of the Web API plugin. +// WebAPI defines the parameters of the Web API plugin. type WebAPI struct { Enabled bool webapi.ParametersDefinition } -// Autopeering defines the parameters of the autopeering plugin. +// AutoPeering defines the parameters of the autopeering plugin. type AutoPeering struct { Enabled bool diff --git a/tools/integration-tests/tester/framework/parameters.go b/tools/integration-tests/tester/framework/parameters.go index dc2527de3a..cbe218b3c2 100644 --- a/tools/integration-tests/tester/framework/parameters.go +++ b/tools/integration-tests/tester/framework/parameters.go @@ -60,7 +60,7 @@ func PeerConfig() config.GoShimmer { c.Image = "iotaledger/goshimmer" - c.DisabledPlugins = []string{"portcheck", "analysisClient", "profiling", "clock", "remotelogmetrics", "remotemetrics", "epochStorage", "WebAPIEpochEndpoint", "ManaInitializer"} + c.DisabledPlugins = []string{"portcheck", "analysisClient", "profiling", "clock", "remotelogmetrics", "remotemetrics", "epochStorage", "WebAPIEpochEndpoint", "ManaInitializer", "Warpsync"} c.GenesisTime = GenesisTime @@ -93,7 +93,7 @@ func PeerConfig() config.GoShimmer { c.Notarization.Enabled = true c.Notarization.BootstrapWindow = 0 // disable bootstrap window for tests - c.Notarization.MinEpochCommitableAge = 10 * time.Second + c.Notarization.MinEpochCommittableAge = 10 * time.Second c.RateSetter.Enabled = true c.RateSetter.RateSetterParametersDefinition.Enable = false @@ -101,9 +101,6 @@ func PeerConfig() config.GoShimmer { c.Faucet.Enabled = false c.Faucet.Seed = base58.Encode(GenesisSeedBytes) c.Faucet.PowDifficulty = 1 - c.Faucet.SupplyOutputsCount = 4 - c.Faucet.SplittingMultiplier = 4 - c.Faucet.GenesisTokenAmount = 2500000000000000 c.Mana.Enabled = true @@ -123,7 +120,7 @@ func EntryNodeConfig() config.GoShimmer { "manualpeering", "chat", "WebAPIDataEndpoint", "WebAPIFaucetRequestEndpoint", "WebAPIBlockEndpoint", "Snapshot", "WebAPIWeightProviderEndpoint", "WebAPIInfoEndpoint", "WebAPIRateSetterEndpoint", "WebAPISchedulerEndpoint", "WebAPIEpochEndpoint", "EpochStorage", "remotelog", "remotelogmetrics", "DAGsVisualizer", "Notarization", - "Firewall", "WebAPILedgerstateEndpoint", "BootstrapManager") + "Firewall", "WebAPILedgerstateEndpoint", "BootstrapManager", "Warpsync") c.P2P.Enabled = false c.Gossip.Enabled = false c.POW.Enabled = false diff --git a/tools/integration-tests/tester/go.mod b/tools/integration-tests/tester/go.mod index 8aae683e7f..ca5f6f014f 100644 --- a/tools/integration-tests/tester/go.mod +++ b/tools/integration-tests/tester/go.mod @@ -7,7 +7,7 @@ require ( github.com/docker/docker v1.13.1 github.com/docker/go-connections v0.4.0 github.com/iotaledger/goshimmer v0.1.3 - github.com/iotaledger/hive.go/core v0.0.0-20220804174551-efbca20a83e4 + github.com/iotaledger/hive.go/core v1.0.0-beta.3.0.20220825155653-0a69188181ca github.com/mr-tron/base58 v1.2.0 github.com/stretchr/testify v1.8.0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa @@ -33,6 +33,7 @@ require ( github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/go-units v0.4.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect + github.com/ethereum/go-ethereum v1.10.21 // indirect github.com/fatih/structs v1.1.0 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect @@ -47,14 +48,15 @@ require ( github.com/go-stack/stack v1.8.0 // indirect github.com/gobuffalo/here v0.6.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/gohornet/grocksdb v1.7.1-0.20220426081058-60f50d7c59e8 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect - github.com/huin/goupnp v1.0.2 // indirect - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-20220804174551-efbca20a83e4 // indirect + github.com/huin/goupnp v1.0.3 // indirect + github.com/iancoleman/orderedmap v0.2.0 // indirect + github.com/iotaledger/grocksdb v1.7.5-0.20220808142449-1dc0b8ac4d7d // indirect + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-beta.2.0.20220825155653-0a69188181ca // indirect github.com/ipfs/go-cid v0.0.7 // indirect github.com/ipfs/go-ipfs-util v0.0.2 // indirect github.com/ipfs/go-log v1.0.5 // indirect @@ -174,9 +176,9 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/dig v1.15.0 // indirect go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b // indirect - golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704 // indirect + go.uber.org/zap v1.22.0 // indirect + golang.org/x/net v0.0.0-20220809012201-f428fae20770 // indirect + golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect google.golang.org/protobuf v1.28.1 // indirect diff --git a/tools/integration-tests/tester/go.sum b/tools/integration-tests/tester/go.sum index 0737d7dede..5c957e59b8 100644 --- a/tools/integration-tests/tester/go.sum +++ b/tools/integration-tests/tester/go.sum @@ -228,6 +228,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ethereum/go-ethereum v1.10.21 h1:5lqsEx92ZaZzRyOqBEXux4/UR06m296RGzN3ol3teJY= +github.com/ethereum/go-ethereum v1.10.21/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= @@ -334,8 +335,6 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= -github.com/gohornet/grocksdb v1.7.1-0.20220426081058-60f50d7c59e8 h1:JBcaA1xdFtalpZsMTYZuwUSOZxPAqqYAZa1gKYpK9nw= -github.com/gohornet/grocksdb v1.7.1-0.20220426081058-60f50d7c59e8/go.mod h1:RlgTltBHJ3ha/p0pWAd1g2zjw/524L1Vw6pjBTYLdIA= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -471,19 +470,24 @@ github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKe github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI= github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= +github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= +github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/iancoleman/orderedmap v0.2.0 h1:sq1N/TFpYH++aViPcaKjys3bDClUEU7s5B+z6jq8pNA= +github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/iotaledger/hive.go/core v0.0.0-20220804174551-efbca20a83e4 h1:LyZZsG5V5esS3iN6f42AVyUTxodo8s8KkkHYQi+CC8o= -github.com/iotaledger/hive.go/core v0.0.0-20220804174551-efbca20a83e4/go.mod h1:iiNz8/E6xPs6TWPiBKLB7QZEbaaSSNylnfWKThZNCEc= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-20220804174551-efbca20a83e4 h1:B2N7jbiIKEkLPPA5/kyDXqO6T+cPr5xhhDKS3Tjph9I= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-20220804174551-efbca20a83e4/go.mod h1:R6Q0aeFvUzt7+Mjd7fEXeCHOHasMLmXPxUGhCybTSw4= +github.com/iotaledger/grocksdb v1.7.5-0.20220808142449-1dc0b8ac4d7d h1:KYc/EkMX3CXvsYyUC9EvToUeYc0c74ZwjRg/0Wd27LU= +github.com/iotaledger/grocksdb v1.7.5-0.20220808142449-1dc0b8ac4d7d/go.mod h1:DuNKJ1G/vKugT7WGAoftMTu2aApNNxF4ADFMxLmKS2Y= +github.com/iotaledger/hive.go/core v1.0.0-beta.3.0.20220825155653-0a69188181ca h1:IyCodMz8Hz51t9Hh6/dQIEoJH8Un5neyeTaNDmOmwpE= +github.com/iotaledger/hive.go/core v1.0.0-beta.3.0.20220825155653-0a69188181ca/go.mod h1:aBKzVl6kjSz2bHsNoxAmTJUpHf/sr1x0PdOJbteEcsQ= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-beta.2.0.20220825155653-0a69188181ca h1:ZCJLYXxqi9hUo89BnJ7UVXLqruLZjdXX9tEY0J0aXYE= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-beta.2.0.20220825155653-0a69188181ca/go.mod h1:beZKjVT4HPayWfwsmItNNI5E81rS783vGx5ZwRbZQgY= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -1316,7 +1320,6 @@ go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -1331,8 +1334,8 @@ go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= +go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1461,8 +1464,8 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b h1:3ogNYyK4oIQdIKzTu68hQrr4iuVxF3AxKl9Aj/eDrw0= -golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220809012201-f428fae20770 h1:dIi4qVdvjZEjiMDv7vhokAZNGnz3kepwuXqFKYDdDMs= +golang.org/x/net v0.0.0-20220809012201-f428fae20770/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1573,8 +1576,8 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704 h1:Y7NOhdqIOU8kYI7BxsgL38d0ot0raxvcW+EMQU2QrT4= -golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -1657,8 +1660,7 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 h1:0c3L82FDQ5rt1bjTBlchS8t6RQ6299/+5bWMnRLh+uI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/tools/integration-tests/tester/tests/common/common_synchronization_test.go b/tools/integration-tests/tester/tests/common/common_synchronization_test.go index 535480d401..76a6faf245 100644 --- a/tools/integration-tests/tester/tests/common/common_synchronization_test.go +++ b/tools/integration-tests/tester/tests/common/common_synchronization_test.go @@ -60,6 +60,7 @@ func TestCommonSynchronization(t *testing.T) { // 4. check whether all issued blocks are available on to the new peer tests.RequireBlocksAvailable(t, []*framework.Node{newPeer}, ids, time.Minute, tests.Tick) tests.RequireBlocksEqual(t, []*framework.Node{newPeer}, ids, time.Minute, tests.Tick) + require.True(t, tests.Synced(t, newPeer)) // 5. shut down newly added peer diff --git a/tools/integration-tests/tester/tests/consensus/consensus_conflict_spam_test.go b/tools/integration-tests/tester/tests/consensus/consensus_conflict_spam_test.go index 3636a3b325..7d466af9e3 100644 --- a/tools/integration-tests/tester/tests/consensus/consensus_conflict_spam_test.go +++ b/tools/integration-tests/tester/tests/consensus/consensus_conflict_spam_test.go @@ -56,8 +56,6 @@ func TestConflictSpamAndMergeToMaster(t *testing.T) { t.Logf("Sending %d data blocks to confirm Faucet Outputs", dataBlocksAmount) tests.SendDataBlocksWithDelay(t, n.Peers(), dataBlocksAmount, delayBetweenDataBlocks) - tests.AwaitInitialFaucetOutputsPrepared(t, faucet, n.Peers()) - fundingAddress := peer1.Address(0) tests.SendFaucetRequest(t, peer1, fundingAddress) diff --git a/tools/integration-tests/tester/tests/consensus/consensus_test.go b/tools/integration-tests/tester/tests/consensus/consensus_test.go index 127d8c05c4..ec735bf743 100644 --- a/tools/integration-tests/tester/tests/consensus/consensus_test.go +++ b/tools/integration-tests/tester/tests/consensus/consensus_test.go @@ -78,7 +78,7 @@ func TestSimpleDoubleSpend(t *testing.T) { var txs1 []*devnetvm.Transaction var txs2 []*devnetvm.Transaction - // send transactions on the seperate partitions + // send transactions on the separate partitions for i := 0; i < numberOfConflictingTxs; i++ { t.Logf("issuing conflict %d", i+1) // This builds transactions that move the genesis funds on the first partition. @@ -103,7 +103,7 @@ func TestSimpleDoubleSpend(t *testing.T) { res2, err := node2.GetTransactionMetadata(txs2[0].ID().Base58()) require.NoError(t, err) return len(res1.ConflictIDs) > 0 && len(res2.ConflictIDs) > 0 - }, tests.Timeout, tests.Tick) + }, tests.Timeout*2, tests.Tick) // we issue blks on both nodes so the txs' ConfirmationState can change, given that they are dependent on their // attachments' ConfirmationState. if blks would only be issued on node 2 or 1, they weight would never surpass 50%. @@ -119,7 +119,7 @@ func TestSimpleDoubleSpend(t *testing.T) { ConfirmationState: confirmation.Rejected, Solid: tests.True(), }, - }, time.Minute, tests.Tick) + }, time.Minute*2, tests.Tick) } } diff --git a/tools/integration-tests/tester/tests/faucet/faucet_prepare_test.go b/tools/integration-tests/tester/tests/faucet/faucet_prepare_test.go deleted file mode 100644 index d909def5fa..0000000000 --- a/tools/integration-tests/tester/tests/faucet/faucet_prepare_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package faucet - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/iotaledger/goshimmer/packages/core/ledger/vm/devnetvm" - "github.com/iotaledger/goshimmer/tools/integration-tests/tester/framework" - "github.com/iotaledger/goshimmer/tools/integration-tests/tester/tests" -) - -// TestFaucetPrepare tests that the faucet prepares outputs to be consumed by faucet requests. -func TestFaucetPrepare(t *testing.T) { - ctx, cancel := tests.Context(context.Background(), t) - defer cancel() - snapshotInfo := tests.EqualSnapshotDetails - n, err := f.CreateNetwork(ctx, t.Name(), 4, framework.CreateNetworkConfig{ - StartSynced: true, - Faucet: true, - Activity: true, - PeerMaster: true, - Snapshot: snapshotInfo, - }, tests.CommonSnapshotConfigFunc(t, snapshotInfo)) - require.NoError(t, err) - defer tests.ShutdownNetwork(ctx, t, n) - - faucet, peer := n.Peers()[0], n.Peers()[1] - // use faucet parameters - var ( - genesisTokenBalance = faucet.Config().GenesisTokenAmount - supplyOutputsCount = faucet.Config().SupplyOutputsCount - splittingMultiplier = faucet.Config().SplittingMultiplier - tokensPerRequest = faucet.Config().TokensPerRequest - fundingOutputsAddrStart = tests.FaucetFundingOutputsAddrStart - lastFundingOutputAddr = supplyOutputsCount*splittingMultiplier + fundingOutputsAddrStart - 1 - ) - - faucet, nonFaucetPeers := n.Peers()[0], n.Peers()[1:] - - // check consensus mana: all nodes should have equal mana - require.Eventually(t, func() bool { - return tests.Mana(t, faucet).Consensus > 0 - }, tests.Timeout, tests.Tick) - require.EqualValues(t, snapshotInfo.GenesisTokenAmount, tests.Mana(t, faucet).Consensus) - - for i, peer := range nonFaucetPeers { - if snapshotInfo.PeersAmountsPledged[i] > 0 { - require.Eventually(t, func() bool { - return tests.Mana(t, peer).Consensus > 0 - }, tests.Timeout, tests.Tick) - } - require.EqualValues(t, snapshotInfo.PeersAmountsPledged[i], tests.Mana(t, peer).Consensus) - } - - // wait for the faucet to split the supply tx and prepare all outputs - tests.AwaitInitialFaucetOutputsPrepared(t, faucet, n.Peers()) - - // check that each of the supplyOutputsCount addresses holds the correct balance - remainderBalance := genesisTokenBalance - uint64(supplyOutputsCount*splittingMultiplier*tokensPerRequest) - require.EqualValues(t, remainderBalance, tests.Balance(t, faucet, faucet.Address(0), devnetvm.ColorIOTA)) - for i := fundingOutputsAddrStart; i <= lastFundingOutputAddr; i++ { - require.EqualValues(t, uint64(tokensPerRequest), tests.Balance(t, faucet, faucet.Address(i), devnetvm.ColorIOTA)) - } - - // consume all but one of the prepared outputs - for i := 1; i < supplyOutputsCount*splittingMultiplier; i++ { - tests.SendFaucetRequest(t, peer, peer.Address(i)) - } - - // wait for the peer to register a balance change - require.Eventually(t, func() bool { - return tests.Balance(t, peer, peer.Address(supplyOutputsCount*splittingMultiplier-1), devnetvm.ColorIOTA) > 0 - }, tests.Timeout, tests.Tick) - - // one prepared output is left from the first prepared batch, index is not known because outputs are not sorted by the index. - var balanceLeft uint64 = 0 - for i := fundingOutputsAddrStart; i <= lastFundingOutputAddr; i++ { - balanceLeft += tests.Balance(t, faucet, faucet.Address(i), devnetvm.ColorIOTA) - } - require.EqualValues(t, uint64(tokensPerRequest), balanceLeft) - - // check that more funds preparation has been triggered - // wait for the faucet to finish preparing new outputs - require.Eventually(t, func() bool { - resp, err := faucet.PostAddressUnspentOutputs([]string{faucet.Address(lastFundingOutputAddr + splittingMultiplier*supplyOutputsCount - 1).Base58()}) - require.NoError(t, err) - return len(resp.UnspentOutputs[0].Outputs) > 0 - }, tests.Timeout, tests.Tick) - - // check that each of the supplyOutputsCount addresses holds the correct balance - for i := lastFundingOutputAddr + 1; i <= lastFundingOutputAddr+splittingMultiplier*supplyOutputsCount; i++ { - require.EqualValues(t, uint64(tokensPerRequest), tests.Balance(t, faucet, faucet.Address(i), devnetvm.ColorIOTA)) - } - - // check that remainder has correct balance - remainderBalance -= uint64(supplyOutputsCount * tokensPerRequest * splittingMultiplier) - - require.EqualValues(t, remainderBalance, tests.Balance(t, faucet, faucet.Address(0), devnetvm.ColorIOTA)) -} diff --git a/tools/integration-tests/tester/tests/faucet/faucet_request_test.go b/tools/integration-tests/tester/tests/faucet/faucet_request_test.go index db5def1db6..65d87c74ea 100644 --- a/tools/integration-tests/tester/tests/faucet/faucet_request_test.go +++ b/tools/integration-tests/tester/tests/faucet/faucet_request_test.go @@ -48,8 +48,6 @@ func TestFaucetRequest(t *testing.T) { require.EqualValues(t, snapshotInfo.PeersAmountsPledged[i], tests.Mana(t, peer).Consensus) } - tests.AwaitInitialFaucetOutputsPrepared(t, faucet, n.Peers()) - // each non-faucet peer issues numRequests requests for _, peer := range nonFaucetPeers { for idx := 0; idx < numRequests; idx++ { diff --git a/tools/integration-tests/tester/tests/mana/mana_test.go b/tools/integration-tests/tester/tests/mana/mana_test.go index 806b521167..c5b013c7ba 100644 --- a/tools/integration-tests/tester/tests/mana/mana_test.go +++ b/tools/integration-tests/tester/tests/mana/mana_test.go @@ -155,7 +155,6 @@ func TestManaApis(t *testing.T) { peers := n.Peers() faucet := peers[0] - tests.AwaitInitialFaucetOutputsPrepared(t, faucet, n.Peers()) log.Println("Request mana from faucet...") // waiting for the faucet to have access mana require.Eventually(t, func() bool { diff --git a/tools/integration-tests/tester/tests/testutil.go b/tools/integration-tests/tester/tests/testutil.go index 1d468be69f..704a7d0bb4 100644 --- a/tools/integration-tests/tester/tests/testutil.go +++ b/tools/integration-tests/tester/tests/testutil.go @@ -11,7 +11,6 @@ import ( "github.com/iotaledger/hive.go/core/crypto/ed25519" "github.com/iotaledger/hive.go/core/generics/lo" "github.com/iotaledger/hive.go/core/identity" - "github.com/iotaledger/hive.go/core/types" "github.com/iotaledger/hive.go/core/types/confirmation" "github.com/mr-tron/base58" "github.com/stretchr/testify/require" @@ -34,8 +33,6 @@ const ( Tick = 500 * time.Millisecond shutdownGraceTime = time.Minute - - FaucetFundingOutputsAddrStart = 127 ) // EqualSnapshotDetails defines info for equally distributed consensus mana. @@ -133,36 +130,6 @@ func Mana(t *testing.T, node *framework.Node) jsonmodels.Mana { return info.Mana } -// AwaitInitialFaucetOutputsPrepared waits until the initial outputs are prepared by the faucet. -func AwaitInitialFaucetOutputsPrepared(t *testing.T, faucet *framework.Node, peers []*framework.Node) { - supplyOutputsCount := faucet.Config().SupplyOutputsCount - splittingMultiplier := faucet.Config().SplittingMultiplier - lastFundingOutputAddress := supplyOutputsCount*splittingMultiplier + FaucetFundingOutputsAddrStart - 1 - addrToCheck := faucet.Address(lastFundingOutputAddress).Base58() - - accepted := make(map[int]types.Empty) - require.Eventually(t, func() bool { - if len(accepted) == supplyOutputsCount*splittingMultiplier { - return true - } - // wait for confirmation of each fundingOutput - for fundingIndex := FaucetFundingOutputsAddrStart; fundingIndex <= lastFundingOutputAddress; fundingIndex++ { - if _, ok := accepted[fundingIndex]; !ok { - resp, err := faucet.PostAddressUnspentOutputs([]string{addrToCheck}) - require.NoError(t, err) - if len(resp.UnspentOutputs[0].Outputs) != 0 { - if resp.UnspentOutputs[0].Outputs[0].ConfirmationState.IsAccepted() { - accepted[fundingIndex] = types.Void - } - } - } - } - return false - }, time.Minute, Tick) - // give the faucet time to save the latest accepted output - time.Sleep(3 * time.Second) -} - // AddressUnspentOutputs returns the unspent outputs on address. func AddressUnspentOutputs(t *testing.T, node *framework.Node, address devnetvm.Address, numOfExpectedOuts int) []jsonmodels.WalletOutput { resp, err := node.PostAddressUnspentOutputs([]string{address.Base58()}) @@ -193,7 +160,7 @@ func Balance(t *testing.T, node *framework.Node, address devnetvm.Address, color // SendFaucetRequest sends a data block on a given peer and returns the id and a DataBlockSent struct. By default, // it pledges mana to the peer making the request. func SendFaucetRequest(t *testing.T, node *framework.Node, addr devnetvm.Address, manaPledgeIDs ...string) (string, DataBlockSent) { - nodeID := base58.Encode(node.ID().Bytes()) + nodeID := node.ID().EncodeBase58() aManaPledgeID, cManaPledgeID := nodeID, nodeID if len(manaPledgeIDs) > 1 { aManaPledgeID, cManaPledgeID = manaPledgeIDs[0], manaPledgeIDs[1] diff --git a/tools/integration-tests/tester/tests/value/value_test.go b/tools/integration-tests/tester/tests/value/value_test.go index 31455b8cab..ea7c36e68b 100644 --- a/tools/integration-tests/tester/tests/value/value_test.go +++ b/tools/integration-tests/tester/tests/value/value_test.go @@ -59,8 +59,6 @@ func TestValueTransactionPersistence(t *testing.T) { } tokensPerRequest := uint64(faucet.Config().Faucet.TokensPerRequest) - tests.AwaitInitialFaucetOutputsPrepared(t, faucet, n.Peers()) - addrBalance := make(map[string]map[devnetvm.Color]uint64) // request funds from faucet @@ -144,8 +142,6 @@ func TestValueAliasPersistence(t *testing.T) { require.EqualValues(t, snapshotInfo.PeersAmountsPledged[i], tests.Mana(t, peer).Consensus) } - tests.AwaitInitialFaucetOutputsPrepared(t, faucet, n.Peers()) - // create a wallet that connects to a random peer w := wallet.New(wallet.WebAPI(nonFaucetPeers[0].BaseURL()), wallet.FaucetPowDifficulty(faucet.Config().Faucet.PowDifficulty))