diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..d2f752a --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @initia-labs/core diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..4dbb73a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,24 @@ +--- +name: Bug report +about: Create a report to help us improve +title: "" +labels: "" +assignees: "" +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: + +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..bbcbbe7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000..c722400 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,84 @@ +name: Lint +# This workflow is run on every pull request and push to master +# The `golangci` will pass without running if no *.{go, mod, sum} files have been changed. +on: + pull_request: + paths: + - "**.go" + - "go.mod" + - "go.sum" + push: + branches: + - main + - "release/*" + paths: + - "**.go" + - "go.mod" + - "go.sum" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + golangci: + env: + GOLANGCI_LINT_VERSION: v1.59.1 + name: golangci-lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: 1.22 + check-latest: true + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + **/**.go + go.mod + go.sum + # install golangci-lint + - run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION} + - name: run go linters (long) + if: env.GIT_DIFF + id: lint_long + run: | + make lint + env: + GIT_DIFF: ${{ env.GIT_DIFF }} + - uses: technote-space/get-diff-action@v6.1.2 + if: steps.lint_long.outcome == 'skipped' + with: + PATTERNS: | + **/**.go + go.mod + go.sum + - name: run go linters (short) + if: steps.lint_long.outcome == 'skipped' && env.GIT_DIFF + run: | + make lint + env: + GIT_DIFF: ${{ env.GIT_DIFF }} + LINT_DIFF: 1 + # Use --check or --exit-code when available (Go 1.19?) + # https://github.com/golang/go/issues/27005 + tidy: + runs-on: ubuntu-latest + name: tidy + steps: + - uses: actions/checkout@v4 + - name: Setup go + uses: actions/setup-go@v5 + with: + go-version: 1.22 + check-latest: true + - run: | + go mod tidy + CHANGES_IN_REPO=$(git status --porcelain) + if [[ -n "$CHANGES_IN_REPO" ]]; then + echo "Repository is dirty. Showing 'git status' and 'git --no-pager diff' for debugging now:" + git status && git --no-pager diff + exit 1 + fi diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..8c8c105 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,42 @@ +name: Build & Test +on: + pull_request: + paths: + - "**.go" + push: + branches: + - main + - "release/*" + paths: + - "**.go" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + test: + name: Run test + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v3 + with: + go-version: 1.22 + - name: Install openssl + run: sudo apt-get install libssl-dev + - uses: actions/checkout@v3 + - uses: technote-space/get-diff-action@v4 + with: + PATTERNS: | + **/**.go + go.mod + go.sum + - name: build + run: | + make build + - name: test + if: env.GIT_DIFF + run: | + go test ./... -mod=readonly -timeout 12m + env: + GIT_DIFF: ${{ env.GIT_DIFF }} diff --git a/.gitignore b/.gitignore index ce6f7f6..198429d 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,15 @@ -executor.json \ No newline at end of file +executor.json + +# Go specific +*.exe +*.dll +*.so +*.dylib +*.test +*.out + +# Build output +build/ + +# Dependency directories +vendor/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..548b880 --- /dev/null +++ b/LICENSE @@ -0,0 +1,109 @@ +Business Source License 1.1 + +License text copyright © 2023 MariaDB plc, All Rights Reserved. +“Business Source License” is a trademark of MariaDB plc. + +----------------------------------------------------------------------------- + +Parameters + +Licensor: Initia Foundation + +Licensed Work: Initia + The Licensed Work is (c) 2023 Initia Foundation + +Additional Use Grant: None + +Change Date: 2028-01-01 + +Change License: GNU Lesser General Public License v3.0 or later + +For information about alternative licensing arrangements for the Software, +please visit: https://mariadb.com/products/mariadb-enterprise + +----------------------------------------------------------------------------- + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +----------------------------------------------------------------------------- + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. + +----------------------------------------------------------------------------- + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +For more information on the use of the Business Source License for MariaDB +products, please visit the MariaDB Business Source License FAQ at +https://mariadb.com/bsl-faq-mariadb. + +For more information on the use of the Business Source License generally, +please visit the Adopting and Developing Business Source License FAQ at +https://mariadb.com/bsl-faq-adopting. \ No newline at end of file diff --git a/LICENSE.header b/LICENSE.header new file mode 100644 index 0000000..2815f23 --- /dev/null +++ b/LICENSE.header @@ -0,0 +1,19 @@ +SPDX-License-Identifier: BUSL-1.1 + +Copyright (C) 2023, Initia Foundation. All rights reserved. +Use of this software is govered by the Business Source License included +in the LICENSE file of this repository and at www.mariadb.com/bsl11. + +ANY USE OF THE LICENSED WORK IN VIOLATION OF THIS LICENSE WILL AUTOMATICALLY +TERMINATE YOUR RIGHTS UNDER THIS LICENSE FOR THE CURRENT AND ALL OTHER +VERSIONS OF THE LICENSED WORK. + +THIS LICENSE DOES NOT GRANT YOU ANY RIGHT IN ANY TRADEMARK OR LOGO OF +LICENSOR OR ITS AFFILIATES (PROVIDED THAT YOU MAY USE A TRADEMARK OR LOGO OF +LICENSOR AS EXPRESSLY REQUIRED BY THIS LICENSE). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..9010ada --- /dev/null +++ b/Makefile @@ -0,0 +1,108 @@ +#!/usr/bin/make -f + +BRANCH := $(shell git rev-parse --abbrev-ref HEAD) +COMMIT := $(shell git log -1 --format='%H') +BUILDDIR ?= $(CURDIR)/build + +# don't override user values +ifeq (,$(VERSION)) + VERSION := $(shell git describe --tags) + # if VERSION is empty, then populate it with branch's name and raw commit hash + ifeq (,$(VERSION)) + VERSION := $(BRANCH)-$(COMMIT) + endif +endif + +export GO111MODULE = on + +# Build target +BINARY_NAME = opinitd + +build_tags = netgo +build_tags += $(BUILD_TAGS) +build_tags := $(strip $(build_tags)) + +ldflags = -X github.com/initia-labs/opinit-bots-go/version.Version=$(VERSION) \ + -X github.com/initia-labs/opinit-bots-go/version.GitCommit=$(COMMIT) + +ldflags += $(LDFLAGS) +ldflags := $(strip $(ldflags)) +ldflags += -w -s + +BUILD_FLAGS := -tags "$(build_tags)" -ldflags '$(ldflags)' + +all: install test + +build: go.sum +ifeq ($(OS),Windows_NT) + exit 1 +else + go build -mod=readonly $(BUILD_FLAGS) -o build/$(BINARY_NAME) ./cmd/$(BINARY_NAME) +endif + +install: go.sum + go install -mod=readonly $(BUILD_FLAGS) ./cmd/$(BINARY_NAME) + +.PHONY: build install + +######################################## +### Tools & dependencies + +go-mod-cache: go.sum + @echo "--> Download go modules to local cache" + @go mod download + +go.sum: go.mod + @echo "--> Ensure dependencies have not been modified" + @go mod verify + +draw-deps: + @# requires brew install graphviz or apt-get install graphviz + go install github.com/RobotsAndPencils/goviz + @goviz -i ./cmd/initiad -d 2 | dot -Tpng -o dependency-graph.png + +clean: + rm -rf \ + $(BUILDDIR)/ + +.PHONY: clean + +############################################################################### +### Linting ### +############################################################################### + +lint: + golangci-lint run --out-format=tab --timeout=15m + +lint-fix: + golangci-lint run --fix --out-format=tab --timeout=15m +.PHONY: lint lint-fix + +format: + find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' | xargs gofmt -w -s + find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' | xargs misspell -w + find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' | xargs goimports -w -local github.com/cosmos/cosmos-sdk +.PHONY: format + + +############################################################################### +### Tests +############################################################################### + +test: test-unit + +test-all: test-unit test-race test-cover + +test-unit: + @VERSION=$(VERSION) go test -mod=readonly -tags='ledger test_ledger_mock' ./... + +test-race: + @VERSION=$(VERSION) go test -mod=readonly -race -tags='ledger test_ledger_mock' ./... + +test-cover: + @go test -mod=readonly -timeout 30m -race -coverprofile=coverage.txt -covermode=atomic -tags='ledger test_ledger_mock' ./... + +benchmark: + @go test -timeout 20m -mod=readonly -bench=. ./... + +.PHONY: test test-all test-cover test-unit test-race benchmark diff --git a/README.md b/README.md index eebfaa1..b9bc3ca 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,48 @@ -# OPINIT-BOTS Go version +# OPinit Bots Go -`go run main.go start executor --config=./executor.json` \ No newline at end of file +This repository contains the Go implementation of OPinit bots. + +## Components + +- [Executor](./executor) +- Batch Submitter +- Challenger + +## How to Use + +### Prerequisites + +Before running OPinit bots, make sure you have the following prerequisites installed: + +- Go 1.22.2+ +- Node RPC (L1 and L2) + +To ensure compatibility with the node version, check the following versions: + +| L1 Node | MiniMove | MiniWasm | MiniEVM | OPinit-bots | +| ------- | -------- | -------- | ------- | ----------- | +| v0.4.0 | v0.4.0 | v0.4.0 | v0.4.0 | v0.1.0 | + +### Build and Configure + +To build and configure the bots, follow these steps: + +```bash +make install + +# Default config path is ~/.opinit/[bot-name].json +# - Customize home dir with --home ~/.opinit-custom-path +# - Customize config name with --config [bot-custom-name].json +# +# Supported bot names +# - executor +opinitd init [bot-name] +``` + +### Start Bot Program + +To start the bot program, use the following command: + +```bash +opinitd start [bot-name] +``` diff --git a/bot/bot.go b/bot/bot.go index 709876c..7cd0978 100644 --- a/bot/bot.go +++ b/bot/bot.go @@ -6,10 +6,14 @@ import ( "fmt" "io" "os" + "path" + + "go.uber.org/zap" "github.com/cosmos/cosmos-sdk/std" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/auth" + "github.com/initia-labs/OPinit/x/opchild" "github.com/initia-labs/OPinit/x/ophost" initiaapp "github.com/initia-labs/initia/app" @@ -19,7 +23,6 @@ import ( "github.com/initia-labs/opinit-bots-go/executor" executortypes "github.com/initia-labs/opinit-bots-go/executor/types" "github.com/initia-labs/opinit-bots-go/server" - "go.uber.org/zap" ) func LoadJsonConfig(path string, config bottypes.Config) error { @@ -40,7 +43,7 @@ func LoadJsonConfig(path string, config bottypes.Config) error { return nil } -func NewBot(name string, logger *zap.Logger, homePath string, configPath string) (bottypes.Bot, error) { +func NewBot(name bottypes.BotType, logger *zap.Logger, homePath string, configName string) (bottypes.Bot, error) { SetSDKConfig() encodingConfig := params.MakeEncodingConfig() @@ -58,8 +61,10 @@ func NewBot(name string, logger *zap.Logger, homePath string, configPath string) opchild.AppModuleBasic{}.RegisterLegacyAminoCodec(encodingConfig.Amino) switch name { - case bottypes.ExecutorName: + case bottypes.BotTypeExecutor: cfg := &executortypes.Config{} + + configPath := path.Join(homePath, configName) err := LoadJsonConfig(configPath, cfg) if err != nil { return nil, err @@ -92,6 +97,6 @@ func SetSDKConfig() { sdkConfig.Seal() } -func getDBPath(homePath string, botName string) string { +func getDBPath(homePath string, botName bottypes.BotType) string { return fmt.Sprintf(homePath+"/%s.db", botName) } diff --git a/bot/types/const.go b/bot/types/const.go index dae345d..c3c1319 100644 --- a/bot/types/const.go +++ b/bot/types/const.go @@ -1,3 +1,16 @@ package types -const ExecutorName = "executor" +type BotType string + +const ( + BotTypeExecutor BotType = "executor" +) + +func BotTypeFromString(name string) BotType { + switch name { + case "executor": + return BotTypeExecutor + } + + panic("unknown bot type") +} diff --git a/cmd/flags.go b/cmd/flags.go deleted file mode 100644 index c3176ac..0000000 --- a/cmd/flags.go +++ /dev/null @@ -1,21 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - flagHome = "home" - flagConfigPath = "config" - flagExecutorKeyName = "executor" -) - -func configFlag(v *viper.Viper, cmd *cobra.Command) *cobra.Command { - cmd.Flags().StringP(flagConfigPath, "c", "", "config file path") - if err := v.BindPFlag(flagConfigPath, cmd.Flags().Lookup(flagConfigPath)); err != nil { - panic(err) - } - - return cmd -} diff --git a/cmd/context.go b/cmd/opinitd/context.go similarity index 91% rename from cmd/context.go rename to cmd/opinitd/context.go index 83c2c50..23d330a 100644 --- a/cmd/context.go +++ b/cmd/opinitd/context.go @@ -1,4 +1,4 @@ -package cmd +package main import ( "github.com/spf13/viper" diff --git a/cmd/opinitd/flags.go b/cmd/opinitd/flags.go new file mode 100644 index 0000000..8a7e4fb --- /dev/null +++ b/cmd/opinitd/flags.go @@ -0,0 +1,26 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +const ( + flagHome = "home" + flagConfigName = "config" + flagExecutorKeyName = "executor" +) + +var defaultHome = filepath.Join(os.Getenv("HOME"), ".opinit") + +func configFlag(v *viper.Viper, cmd *cobra.Command) *cobra.Command { + cmd.Flags().StringP(flagConfigName, "c", "executor.json", "The name of the configuration file in the home directory. Must have json extension.") + if err := v.BindPFlag(flagConfigName, cmd.Flags().Lookup(flagConfigName)); err != nil { + panic(err) + } + + return cmd +} diff --git a/cmd/opinitd/init.go b/cmd/opinitd/init.go new file mode 100644 index 0000000..4bc4d79 --- /dev/null +++ b/cmd/opinitd/init.go @@ -0,0 +1,63 @@ +package main + +import ( + "encoding/json" + "errors" + "os" + "path" + + "github.com/spf13/cobra" + + bottypes "github.com/initia-labs/opinit-bots-go/bot/types" + executortypes "github.com/initia-labs/opinit-bots-go/executor/types" +) + +func initCmd(ctx *cmdContext) *cobra.Command { + cmd := &cobra.Command{ + Use: "init [bot-name]", + Args: cobra.ExactArgs(1), + Short: "Initialize a bot's configuration files.", + Long: `Initialize a bot's configuration files. + +Currently supported bots are: executor +`, + RunE: func(cmd *cobra.Command, args []string) error { + configName, err := cmd.Flags().GetString(flagConfigName) + if err != nil { + return err + } + + configPath := path.Join(ctx.homePath, configName) + if path.Ext(configPath) != ".json" { + return errors.New("config file must be a json file") + } + + botType := bottypes.BotTypeFromString(args[0]) + switch botType { + case bottypes.BotTypeExecutor: + if err := os.MkdirAll(ctx.homePath, os.ModePerm); err != nil { + return err + } + + f, err := os.Create(configPath) + if err != nil { + return err + } + + bz, err := json.MarshalIndent(executortypes.DefaultConfig(), "", " ") + if err != nil { + return err + } + + if _, err := f.Write(bz); err != nil { + return err + } + } + + return nil + }, + } + + cmd = configFlag(ctx.v, cmd) + return cmd +} diff --git a/cmd/opinitd/main.go b/cmd/opinitd/main.go new file mode 100644 index 0000000..57f94f8 --- /dev/null +++ b/cmd/opinitd/main.go @@ -0,0 +1,18 @@ +package main + +import ( + "fmt" + "os" +) + +// TODO: use cmd package to build and run the bot +// just test the bot with this main function + +func main() { + rootCmd := NewRootCmd() + + if err := rootCmd.Execute(); err != nil { + fmt.Fprintln(rootCmd.OutOrStderr(), err) + os.Exit(1) + } +} diff --git a/cmd/root.go b/cmd/opinitd/root.go similarity index 93% rename from cmd/root.go rename to cmd/opinitd/root.go index 1e5e994..1ff2176 100644 --- a/cmd/root.go +++ b/cmd/opinitd/root.go @@ -1,16 +1,14 @@ -package cmd +package main import ( - "os" - "path/filepath" - "github.com/spf13/cobra" "github.com/spf13/viper" + "go.uber.org/zap" "go.uber.org/zap/zapcore" -) -var defaultHome = filepath.Join(os.Getenv("HOME"), ".opinit") + "github.com/initia-labs/opinit-bots-go/version" +) func NewRootCmd() *cobra.Command { ctx := &cmdContext{ @@ -44,8 +42,11 @@ func NewRootCmd() *cobra.Command { } rootCmd.AddCommand( + initCmd(ctx), startCmd(ctx), + version.NewVersionCommand(), ) + return rootCmd } diff --git a/cmd/opinitd/start.go b/cmd/opinitd/start.go new file mode 100644 index 0000000..4eaa82d --- /dev/null +++ b/cmd/opinitd/start.go @@ -0,0 +1,55 @@ +package main + +import ( + "context" + "os" + "os/signal" + "syscall" + + "github.com/spf13/cobra" + + "github.com/initia-labs/opinit-bots-go/bot" + bottypes "github.com/initia-labs/opinit-bots-go/bot/types" +) + +func startCmd(ctx *cmdContext) *cobra.Command { + cmd := &cobra.Command{ + Use: "start [bot-name]", + Args: cobra.ExactArgs(1), + Short: "Start a bot with the given name", + Long: `Start a bot with the given name. + +Currently supported bots: +- executor +`, + RunE: func(cmd *cobra.Command, args []string) error { + configName, err := cmd.Flags().GetString(flagConfigName) + if err != nil { + return err + } + + botType := bottypes.BotTypeFromString(args[0]) + bot, err := bot.NewBot(botType, ctx.logger, ctx.homePath, configName) + if err != nil { + return err + } + + cmdCtx, botDone := context.WithCancel(cmd.Context()) + gracefulShutdown(botDone) + + return bot.Start(cmdCtx) + }, + } + + cmd = configFlag(ctx.v, cmd) + return cmd +} + +func gracefulShutdown(done context.CancelFunc) { + signalChannel := make(chan os.Signal, 2) + signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM) + go func() { + <-signalChannel + done() + }() +} diff --git a/cmd/start.go b/cmd/start.go deleted file mode 100644 index dadc03f..0000000 --- a/cmd/start.go +++ /dev/null @@ -1,29 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" - - bot "github.com/initia-labs/opinit-bots-go/bot" -) - -func startCmd(ctx *cmdContext) *cobra.Command { - cmd := &cobra.Command{ - Use: "start [bot-name]", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - configPath, err := cmd.Flags().GetString(flagConfigPath) - if err != nil { - return err - } - - bot, err := bot.NewBot(args[0], ctx.logger, ctx.homePath, configPath) - if err != nil { - return err - } - return bot.Start(cmd.Context()) - }, - } - - cmd = configFlag(ctx.v, cmd) - return cmd -} diff --git a/db/db.go b/db/db.go index 9095a26..f67cd93 100644 --- a/db/db.go +++ b/db/db.go @@ -1,10 +1,13 @@ package db import ( - dbtypes "github.com/initia-labs/opinit-bots-go/db/types" - "github.com/initia-labs/opinit-bots-go/types" + "bytes" + "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/util" + + dbtypes "github.com/initia-labs/opinit-bots-go/db/types" + "github.com/initia-labs/opinit-bots-go/types" ) var _ types.DB = (*LevelDB)(nil) @@ -27,7 +30,10 @@ func NewDB(path string) (types.DB, error) { }, nil } -func (db *LevelDB) RawBatchSet(kvs ...types.KV) error { +// RawBatchSet sets the key-value pairs in the database without prefixing the keys. +// +// @dev: `LevelDB.prefix“ is not used as the prefix for the keys. +func (db *LevelDB) RawBatchSet(kvs ...types.RawKV) error { if len(kvs) == 0 { return nil } @@ -42,6 +48,7 @@ func (db *LevelDB) RawBatchSet(kvs ...types.KV) error { return db.db.Write(batch, nil) } +// BatchSet sets the key-value pairs in the database with prefixing the keys. func (db *LevelDB) BatchSet(kvs ...types.KV) error { if len(kvs) == 0 { return nil @@ -57,27 +64,36 @@ func (db *LevelDB) BatchSet(kvs ...types.KV) error { return db.db.Write(batch, nil) } +// Set sets the key-value pair in the database with prefixing the key. func (db *LevelDB) Set(key []byte, value []byte) error { return db.db.Put(db.PrefixedKey(key), value, nil) } +// Get gets the value of the key in the database with prefixing the key. func (db *LevelDB) Get(key []byte) ([]byte, error) { return db.db.Get(db.PrefixedKey(key), nil) } +// Delete deletes the key in the database with prefixing the key. func (db *LevelDB) Delete(key []byte) error { return db.db.Delete(db.PrefixedKey(key), nil) } +// Close closes the database. func (db *LevelDB) Close() error { return db.db.Close() } -func (db *LevelDB) PrefixedIterate(prefix []byte, cb func(key, value []byte) (stop bool)) error { +// PrefixedIterate iterates over the key-value pairs in the database with prefixing the keys. +// +// @dev: `LevelDB.prefix + prefix` is used as the prefix for the iteration. +func (db *LevelDB) PrefixedIterate(prefix []byte, cb func(key, value []byte) (stop bool, err error)) error { iter := db.db.NewIterator(util.BytesPrefix(db.PrefixedKey(prefix)), nil) for iter.Next() { key := db.UnprefixedKey(iter.Key()) - if cb(key, iter.Value()) { + if stop, err := cb(key, iter.Value()); err != nil { + return err + } else if stop { break } } @@ -85,6 +101,9 @@ func (db *LevelDB) PrefixedIterate(prefix []byte, cb func(key, value []byte) (st return iter.Error() } +// SeekPrevInclusiveKey seeks the previous key-value pair in the database with prefixing the keys. +// +// @dev: `LevelDB.prefix + prefix` is used as the prefix for the iteration. func (db *LevelDB) SeekPrevInclusiveKey(prefix []byte, key []byte) (k []byte, v []byte, err error) { iter := db.db.NewIterator(util.BytesPrefix(db.PrefixedKey(prefix)), nil) if iter.Seek(db.PrefixedKey(key)) || iter.Valid() && iter.Prev() || iter.Last() && iter.Valid() { @@ -100,6 +119,7 @@ func (db *LevelDB) SeekPrevInclusiveKey(prefix []byte, key []byte) (k []byte, v return k, v, err } +// WithPrefix returns a new LevelDB with the given prefix. func (db *LevelDB) WithPrefix(prefix []byte) types.DB { return &LevelDB{ db: db.db, @@ -107,12 +127,15 @@ func (db *LevelDB) WithPrefix(prefix []byte) types.DB { } } +// PrefixedKey prefixes the key with the LevelDB.prefix. func (db LevelDB) PrefixedKey(key []byte) []byte { - return append(append(db.prefix, []byte("/")...), key...) + return append(append(db.prefix, dbtypes.Splitter), key...) } +// UnprefixedKey remove the prefix from the key, only +// if the key has the prefix. func (db LevelDB) UnprefixedKey(key []byte) []byte { - return key[len(db.prefix)+1:] + return bytes.TrimPrefix(key, append(db.prefix, dbtypes.Splitter)) } func (db LevelDB) GetPath() string { diff --git a/db/types/utils.go b/db/types/utils.go index 79cd723..11e7003 100644 --- a/db/types/utils.go +++ b/db/types/utils.go @@ -25,13 +25,13 @@ func FromUint64(v uint64) []byte { return []byte(fmt.Sprintf("%d", v)) } -func ToUint64(v []byte) uint64 { +func ToUint64(v []byte) (uint64, error) { data, err := strconv.ParseUint(string(v), 10, 64) if err != nil { - // must not happen - panic(err) + return 0, fmt.Errorf("failed to parse uint64 from %s: %w", string(v), err) } - return data + + return data, nil } func FromUint64Key(v uint64) []byte { diff --git a/executor/README.md b/executor/README.md new file mode 100644 index 0000000..43d525d --- /dev/null +++ b/executor/README.md @@ -0,0 +1,41 @@ +# Executor + +The Executor is responsible for relaying L1 token deposit transactions to L2 and periodically submitting L2 output roots to L1. + +## Config + +To configure the Executor, fill in the values in the `~/.opinit/executor.json` file. + +```json +{ + // Version is the version used to build output root. + "version": 1, + + // ListenAddress is the address to listen for incoming requests. + "listen_address": "tcp://localhost:3000", + + "l1_rpc_address": "tcp://localhost:26657", + "l2_rpc_address": "tcp://localhost:27657", + + "l1_gas_price": "0.15uinit", + "l2_gas_price": "", + + "l1_chain_id": "testnet-l1-1", + "l2_chain_id": "testnet-l2-1", + + // OutputSubmitterMnemonic is the mnemonic phrase for the output submitter, + // which is used to relay the output transaction from l2 to l1. + // + // If you don't want to use the output submitter feature, you can leave it empty. + "output_submitter_mnemonic": "", + + // BridgeExecutorMnemonic is the mnemonic phrase for the bridge executor, + // which is used to relay initiate token bridge transaction from l1 to l2. + // + // If you don't want to use the bridge executor feature, you can leave it empty. + "bridge_executor_mnemonic": "", + + // RelayOracle is the flag to enable the oracle relay feature. + "relay_oracle": false +} +``` diff --git a/executor/child/child.go b/executor/child/child.go index 1c851ce..ed13ae6 100644 --- a/executor/child/child.go +++ b/executor/child/child.go @@ -2,6 +2,7 @@ package child import ( "context" + "fmt" "io" "time" @@ -27,7 +28,7 @@ type hostNode interface { AccountCodec() address.Codec HasKey() bool BroadcastMsgs(nodetypes.ProcessedMsgs) - RawKVProcessedData([]nodetypes.ProcessedMsgs, bool) ([]types.KV, error) + ProcessedMsgsToRawKV([]nodetypes.ProcessedMsgs, bool) ([]types.RawKV, error) QueryLastOutput() (*ophosttypes.QueryOutputProposalResponse, error) QueryOutput(uint64) (*ophosttypes.QueryOutputProposalResponse, error) } @@ -63,13 +64,19 @@ type Child struct { msgQueue []sdk.Msg } -func NewChild(version uint8, cfg nodetypes.NodeConfig, db types.DB, logger *zap.Logger, cdc codec.Codec, txConfig client.TxConfig) *Child { +func NewChild( + version uint8, cfg nodetypes.NodeConfig, + db types.DB, logger *zap.Logger, cdc codec.Codec, txConfig client.TxConfig, +) *Child { node, err := node.NewNode(cfg, db, logger, cdc, txConfig) if err != nil { panic(err) } - mk := merkle.NewMerkle(db.WithPrefix([]byte(executortypes.MerkleName)), ophosttypes.GenerateNodeHash) + mk, err := merkle.NewMerkle(db.WithPrefix([]byte(executortypes.MerkleName)), ophosttypes.GenerateNodeHash) + if err != nil { + panic(err) + } ch := &Child{ version: version, @@ -100,8 +107,15 @@ func (ch *Child) Initialize(host hostNode, bridgeInfo opchildtypes.BridgeInfo) e return nil } -func (ch *Child) Start(ctx context.Context) { - ch.node.Start(ctx, nodetypes.PROCESS_TYPE_DEFAULT) +func (ch *Child) Start(ctx context.Context, errCh chan error) { + defer func() { + if r := recover(); r != nil { + ch.logger.Error("child panic", zap.Any("recover", r)) + errCh <- fmt.Errorf("child panic: %v", r) + } + }() + + ch.node.Start(ctx, errCh) } func (ch *Child) registerHandlers() { @@ -116,11 +130,12 @@ func (ch Child) BroadcastMsgs(msgs nodetypes.ProcessedMsgs) { if !ch.node.HasKey() { return } + ch.node.BroadcastMsgs(msgs) } -func (ch Child) RawKVProcessedData(msgs []nodetypes.ProcessedMsgs, delete bool) ([]types.KV, error) { - return ch.node.RawKVProcessedData(msgs, delete) +func (ch Child) ProcessedMsgsToRawKV(msgs []nodetypes.ProcessedMsgs, delete bool) ([]types.RawKV, error) { + return ch.node.ProcessedMsgsToRawKV(msgs, delete) } func (ch Child) BridgeId() uint64 { @@ -137,3 +152,7 @@ func (ch Child) HasKey() bool { func (ch *Child) SetBridgeInfo(bridgeInfo opchildtypes.BridgeInfo) { ch.bridgeInfo = bridgeInfo } + +func (ch Child) GetHeight() uint64 { + return ch.node.GetHeight() +} diff --git a/executor/child/deposit.go b/executor/child/deposit.go index c9d14fd..3ac31c7 100644 --- a/executor/child/deposit.go +++ b/executor/child/deposit.go @@ -1,7 +1,7 @@ package child import ( - "errors" + "fmt" "strconv" "cosmossdk.io/math" @@ -36,8 +36,9 @@ func (ch *Child) finalizeDepositHandler(args nodetypes.EventHandlerArgs) error { case opchildtypes.AttributeKeyAmount: coinAmount, ok := math.NewIntFromString(attr.Value) if !ok { - return errors.New("invalid amount") + return fmt.Errorf("invalid amount %s", attr.Value) } + amount.Amount = coinAmount case opchildtypes.AttributeKeyFinalizeHeight: l1BlockHeight, err = strconv.ParseUint(attr.Value, 10, 64) diff --git a/executor/child/handler.go b/executor/child/handler.go index 8a3f91e..0f64670 100644 --- a/executor/child/handler.go +++ b/executor/child/handler.go @@ -18,6 +18,7 @@ func (ch *Child) beginBlockHandler(args nodetypes.BeginBlockArgs) (err error) { if err != nil { return err } + err = ch.prepareOutput() if err != nil { return err @@ -27,11 +28,12 @@ func (ch *Child) beginBlockHandler(args nodetypes.BeginBlockArgs) (err error) { func (ch *Child) endBlockHandler(args nodetypes.EndBlockArgs) error { blockHeight := uint64(args.Block.Header.Height) - batchKVs := make([]types.KV, 0) + batchKVs := make([]types.RawKV, 0) treeKVs, storageRoot, err := ch.handleTree(blockHeight, uint64(args.LatestHeight), args.BlockID, args.Block.Header) if err != nil { return err } + batchKVs = append(batchKVs, treeKVs...) if storageRoot != nil { @@ -40,13 +42,17 @@ func (ch *Child) endBlockHandler(args nodetypes.EndBlockArgs) error { return err } } - // collect more msgs if block height is not latest - if blockHeight != args.LatestHeight && len(ch.msgQueue) > 0 && len(ch.msgQueue) <= 10 { + + // if we are in sync and we have a small number of messages, less than 10, + // then store the current updates in the database and process the next block. + if blockHeight < args.LatestHeight && len(ch.msgQueue) > 0 && len(ch.msgQueue) <= 10 { return ch.db.RawBatchSet(batchKVs...) } - batchKVs = append(batchKVs, ch.node.RawKVSyncInfo(blockHeight)) + // update the sync info + batchKVs = append(batchKVs, ch.node.SyncInfoToRawKV(blockHeight)) + // if has key, then process the messages if ch.host.HasKey() { if len(ch.msgQueue) != 0 { ch.processedMsgs = append(ch.processedMsgs, nodetypes.ProcessedMsgs{ @@ -55,11 +61,11 @@ func (ch *Child) endBlockHandler(args nodetypes.EndBlockArgs) error { Save: true, }) } - msgkvs, err := ch.host.RawKVProcessedData(ch.processedMsgs, false) + msgKVs, err := ch.host.ProcessedMsgsToRawKV(ch.processedMsgs, false) if err != nil { return err } - batchKVs = append(batchKVs, msgkvs...) + batchKVs = append(batchKVs, msgKVs...) } err = ch.db.RawBatchSet(batchKVs...) diff --git a/executor/child/oracle.go b/executor/child/oracle.go index 74627cd..87b42a2 100644 --- a/executor/child/oracle.go +++ b/executor/child/oracle.go @@ -25,7 +25,9 @@ func (ch *Child) updateOracleHandler(args nodetypes.EventHandlerArgs) error { from = attr.Value } } + ch.handleUpdateOracle(l1BlockHeight, from) + return nil } diff --git a/executor/child/query.go b/executor/child/query.go index a3ac574..87f1bd5 100644 --- a/executor/child/query.go +++ b/executor/child/query.go @@ -21,7 +21,9 @@ func (ch Child) GetAddressStr() (string, error) { func (ch Child) QueryBridgeInfo() (opchildtypes.BridgeInfo, error) { req := &opchildtypes.QueryBridgeInfoRequest{} - ctx := node.GetQueryContext(0) + ctx, cancel := node.GetQueryContext(0) + defer cancel() + res, err := ch.opchildQueryClient.BridgeInfo(ctx, req) if err != nil { return opchildtypes.BridgeInfo{}, err @@ -31,7 +33,9 @@ func (ch Child) QueryBridgeInfo() (opchildtypes.BridgeInfo, error) { func (ch Child) QueryNextL1Sequence() (uint64, error) { req := &opchildtypes.QueryNextL1SequenceRequest{} - ctx := node.GetQueryContext(0) + ctx, cancel := node.GetQueryContext(0) + defer cancel() + res, err := ch.opchildQueryClient.NextL1Sequence(ctx, req) if err != nil { return 0, err @@ -41,7 +45,9 @@ func (ch Child) QueryNextL1Sequence() (uint64, error) { func (ch Child) QueryNextL2Sequence(height uint64) (uint64, error) { req := &opchildtypes.QueryNextL2SequenceRequest{} - ctx := node.GetQueryContext(height) + ctx, cancel := node.GetQueryContext(height) + defer cancel() + res, err := ch.opchildQueryClient.NextL2Sequence(ctx, req) if err != nil { return 0, err @@ -59,6 +65,7 @@ func (ch Child) QueryWithdrawal(sequence uint64) (executortypes.QueryWithdrawalR if err != nil { return executortypes.QueryWithdrawalResponse{}, err } + amount := sdk.NewCoin(withdrawal.BaseDenom, math.NewIntFromUint64(withdrawal.Amount)) treeExtraData := executortypes.TreeExtraData{} @@ -77,9 +84,8 @@ func (ch Child) QueryWithdrawal(sequence uint64) (executortypes.QueryWithdrawalR Version: []byte{ch.version}, StorageRoot: outputRoot, LatestBlockHash: treeExtraData.BlockHash, - - BlockNumber: treeExtraData.BlockNumber, - Receiver: withdrawal.To, - WithdrawalHash: withdrawal.WithdrawalHash, + BlockNumber: treeExtraData.BlockNumber, + Receiver: withdrawal.To, + WithdrawalHash: withdrawal.WithdrawalHash, }, nil } diff --git a/executor/child/withdraw.go b/executor/child/withdraw.go index 45eba2e..a8a4d8e 100644 --- a/executor/child/withdraw.go +++ b/executor/child/withdraw.go @@ -3,7 +3,6 @@ package child import ( "encoding/base64" "encoding/json" - "errors" "fmt" "strconv" "strings" @@ -41,11 +40,13 @@ func (ch *Child) initiateWithdrawalHandler(args nodetypes.EventHandlerArgs) erro case opchildtypes.AttributeKeyAmount: coinAmount, ok := math.NewIntFromString(attr.Value) if !ok { - return errors.New("invalid amount") + return fmt.Errorf("invalid amount %s", attr.Value) } + amount = coinAmount.Uint64() } } + return ch.handleInitiateWithdrawal(l2Sequence, from, to, baseDenom, amount) } @@ -59,14 +60,19 @@ func (ch *Child) handleInitiateWithdrawal(l2Sequence uint64, from string, to str BaseDenom: baseDenom, WithdrawalHash: withdrawalHash[:], } + + // store to database err := ch.SetWithdrawal(l2Sequence, data) if err != nil { return err } - err = ch.mk.InsertLeaf(withdrawalHash[:], false) + + // generate merkle tree + err = ch.mk.InsertLeaf(withdrawalHash[:]) if err != nil { return err } + ch.logger.Info("initiate token withdrawal", zap.Uint64("l2_sequence", l2Sequence), zap.String("from", from), @@ -75,35 +81,38 @@ func (ch *Child) handleInitiateWithdrawal(l2Sequence uint64, from string, to str zap.String("base_denom", baseDenom), zap.String("withdrawal", base64.StdEncoding.EncodeToString(withdrawalHash[:])), ) + return nil } func (ch *Child) prepareTree(blockHeight uint64) error { if blockHeight == 1 { - ch.mk.SetNewWorkingTree(1, 1) - return nil + return ch.mk.InitializeWorkingTree(1, 1) } err := ch.mk.LoadWorkingTree(blockHeight - 1) if err == dbtypes.ErrNotFound { - // must not happend + // must not happened // TOOD: if user want to start from a specific height, we need to provide a way to do so panic(fmt.Errorf("working tree not found at height: %d, current: %d", blockHeight-1, blockHeight)) } else if err != nil { return err } + return nil } func (ch *Child) prepareOutput() error { workingOutputIndex := ch.mk.GetWorkingTreeIndex() + // initialize next output time if ch.nextOutputTime.IsZero() && workingOutputIndex > 1 { output, err := ch.host.QueryOutput(workingOutputIndex - 1) if err != nil { - // TODO: maybe not panic here and roll back - panic(fmt.Errorf("output does not exist at index: %d", workingOutputIndex-1)) + // TODO: maybe not return error here and roll back + return fmt.Errorf("output does not exist at index: %d", workingOutputIndex-1) } + ch.nextOutputTime = output.OutputProposal.L1BlockTime.Add(ch.bridgeInfo.BridgeConfig.SubmissionInterval * 2 / 3) } @@ -120,35 +129,47 @@ func (ch *Child) prepareOutput() error { return nil } -func (ch *Child) handleTree(blockHeight uint64, latestHeight uint64, blockId []byte, blockHeader cmtproto.Header) (kvs []types.KV, storageRoot []byte, err error) { - // finalize working tree if we are syncing or block time is over next output time +func (ch *Child) handleTree(blockHeight uint64, latestHeight uint64, blockId []byte, blockHeader cmtproto.Header) (kvs []types.RawKV, storageRoot []byte, err error) { + // finalize working tree if we are fully synced or block time is over next output time if ch.finalizingBlockHeight == blockHeight || - (ch.finalizingBlockHeight == 0 && blockHeight == latestHeight && blockHeader.Time.After(ch.nextOutputTime)) { - extraData := executortypes.TreeExtraData{ + (ch.finalizingBlockHeight == 0 && + blockHeight == latestHeight && + blockHeader.Time.After(ch.nextOutputTime)) { + + data, err := json.Marshal(executortypes.TreeExtraData{ BlockNumber: blockHeight, BlockHash: blockId, - } - data, err := json.Marshal(extraData) + }) if err != nil { return nil, nil, err } + kvs, storageRoot, err = ch.mk.FinalizeWorkingTree(data) if err != nil { return nil, nil, err } - ch.logger.Info("finalize tree", zap.Uint64("tree_index", ch.mk.GetWorkingTreeIndex()), zap.Uint64("height", blockHeight), zap.Uint64("num_leaves", ch.mk.GetWorkingTreeLeafCount()), zap.String("storage_root", base64.StdEncoding.EncodeToString(storageRoot))) - // does not submit output since it already submitted + ch.logger.Info("finalize working tree", + zap.Uint64("tree_index", ch.mk.GetWorkingTreeIndex()), + zap.Uint64("height", blockHeight), + zap.Uint64("num_leaves", ch.mk.GetWorkingTreeLeafCount()), + zap.String("storage_root", base64.StdEncoding.EncodeToString(storageRoot)), + ) + + // skip output submission when it is already submitted if ch.finalizingBlockHeight == blockHeight { storageRoot = nil } + ch.finalizingBlockHeight = 0 ch.nextOutputTime = blockHeader.Time.Add(ch.bridgeInfo.BridgeConfig.SubmissionInterval * 2 / 3) } + err = ch.mk.SaveWorkingTree(blockHeight) if err != nil { return nil, nil, err } + return kvs, storageRoot, nil } @@ -174,6 +195,7 @@ func (ch *Child) handleOutput(blockHeight uint64, version uint8, blockId []byte, return nil } +// GetWithdrawal returns the withdrawal data for the given sequence from the database func (ch *Child) GetWithdrawal(sequence uint64) (executortypes.WithdrawalData, error) { dataBytes, err := ch.db.Get(executortypes.PrefixedWithdrawalKey(sequence)) if err != nil { @@ -184,6 +206,7 @@ func (ch *Child) GetWithdrawal(sequence uint64) (executortypes.WithdrawalData, e return data, err } +// SetWithdrawal store the withdrawal data for the given sequence to the database func (ch *Child) SetWithdrawal(sequence uint64, data executortypes.WithdrawalData) error { dataBytes, err := json.Marshal(&data) if err != nil { diff --git a/executor/executor.go b/executor/executor.go index c7493b2..0c394eb 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -22,6 +22,9 @@ import ( var _ bottypes.Bot = &Executor{} +// Executor charges the execution of the bridge between the host and the child chain +// - relay l1 deposit messages to l2 +// - generate l2 output root and submit to l1 type Executor struct { host *host.Host child *child.Child @@ -40,9 +43,17 @@ func NewExecutor(cfg *executortypes.Config, db types.DB, sv *server.Server, logg } executor := &Executor{ - host: host.NewHost(cfg.Version, cfg.HostNode, db.WithPrefix([]byte(executortypes.HostNodeName)), logger.Named(executortypes.HostNodeName), cdc, txConfig), - child: child.NewChild(cfg.Version, cfg.ChildNode, db.WithPrefix([]byte(executortypes.ChildNodeName)), logger.Named(executortypes.ChildNodeName), cdc, txConfig), - batch: batch.NewBatchSubmitter(cfg.Version, cfg.ChildNode, cfg.Batch, db.WithPrefix([]byte(executortypes.BatchNodeName)), logger.Named(executortypes.BatchNodeName), cdc, txConfig, homePath), + host: host.NewHost( + cfg.Version, cfg.RelayOracle, cfg.L1NodeConfig(), + db.WithPrefix([]byte(executortypes.HostNodeName)), + logger.Named(executortypes.HostNodeName), cdc, txConfig, + ), + child: child.NewChild( + cfg.Version, cfg.L2NodeConfig(), + db.WithPrefix([]byte(executortypes.ChildNodeName)), + logger.Named(executortypes.ChildNodeName), cdc, txConfig, + ), + batch: batch.NewBatchSubmitter(cfg.Version, cfg.DANodeConfig(), cfg.DANodeConfig(), db.WithPrefix([]byte(executortypes.BatchNodeName)), logger.Named(executortypes.BatchNodeName), cdc, txConfig, homePath), cfg: cfg, db: db, @@ -57,7 +68,12 @@ func NewExecutor(cfg *executortypes.Config, db types.DB, sv *server.Server, logg if bridgeInfo.BridgeId == 0 { panic("bridge info is not set") } - executor.logger.Info("bridge info", zap.Uint64("id", bridgeInfo.BridgeId), zap.Duration("submission_interval", bridgeInfo.BridgeConfig.SubmissionInterval)) + + executor.logger.Info( + "bridge info", + zap.Uint64("id", bridgeInfo.BridgeId), + zap.Duration("submission_interval", bridgeInfo.BridgeConfig.SubmissionInterval), + ) da := executor.host // if cfg.Batch.DANode.ChainID != cfg.HostNode.ChainID { @@ -95,16 +111,44 @@ func (ex *Executor) Start(cmdCtx context.Context) error { childCtx, childDone := context.WithCancel(cmdCtx) batchCtx, batchDone := context.WithCancel(cmdCtx) - ex.host.Start(hostCtx) - ex.child.Start(childCtx) - ex.batch.Start(batchCtx) + errCh := make(chan error, 3) + ex.host.Start(hostCtx, errCh) + ex.child.Start(childCtx, errCh) + ex.batch.Start(batchCtx, errCh) + + go func() { + err := ex.server.Start(ex.cfg.ListenAddress) + if err != nil { + errCh <- err + } + }() + + shutdown := func(err error) error { + ex.logger.Info("executor shutdown", zap.String("state", "requested")) + + ex.logger.Debug("executor shutdown", zap.String("state", "wait"), zap.String("target", "api")) + ex.server.Shutdown() + + ex.logger.Debug("executor shutdown", zap.String("state", "wait"), zap.String("target", "host")) + hostDone() + + ex.logger.Debug("executor shutdown", zap.String("state", "wait"), zap.String("target", "child")) + childDone() + + ex.logger.Debug("executor shutdown", zap.String("state", "wait"), zap.String("target", "batch")) + batchDone() - err = ex.server.Start() - // TODO: safely shut down - hostDone() - childDone() - batchDone() - return err + ex.logger.Info("executor shutdown completed") + return err + } + + select { + case err := <-errCh: + ex.logger.Error("executor error", zap.String("error", err.Error())) + return shutdown(err) + case <-cmdCtx.Done(): + return shutdown(nil) + } } func (ex *Executor) RegisterQuerier() { @@ -123,4 +167,15 @@ func (ex *Executor) RegisterQuerier() { } return c.JSON(res) }) + + ex.server.RegisterQuerier("/status", func(c *fiber.Ctx) error { + childHeight := ex.child.GetHeight() + hostHeight := ex.host.GetHeight() + res := map[string]uint64{ + "child": childHeight, + "host": hostHeight, + } + + return c.JSON(res) + }) } diff --git a/executor/host/handler.go b/executor/host/handler.go index ccf3367..04a1c76 100644 --- a/executor/host/handler.go +++ b/executor/host/handler.go @@ -26,8 +26,8 @@ func (h *Host) endBlockHandler(args nodetypes.EndBlockArgs) error { return nil } - batchKVs := []types.KV{ - h.node.RawKVSyncInfo(blockHeight), + batchKVs := []types.RawKV{ + h.node.SyncInfoToRawKV(blockHeight), } if h.node.HasKey() { if len(h.msgQueue) != 0 { @@ -38,7 +38,7 @@ func (h *Host) endBlockHandler(args nodetypes.EndBlockArgs) error { }) } - msgkvs, err := h.child.RawKVProcessedData(h.processedMsgs, false) + msgkvs, err := h.child.ProcessedMsgsToRawKV(h.processedMsgs, false) if err != nil { return err } @@ -61,16 +61,15 @@ func (h *Host) endBlockHandler(args nodetypes.EndBlockArgs) error { func (h *Host) txHandler(args nodetypes.TxHandlerArgs) error { if args.BlockHeight == args.LatestHeight && args.TxIndex == 0 { - msg, err := h.oracleTxHandler(args.BlockHeight, args.Tx) - if err != nil { + if msg, err := h.oracleTxHandler(args.BlockHeight, args.Tx); err != nil { return err + } else if msg != nil { + h.processedMsgs = append(h.processedMsgs, nodetypes.ProcessedMsgs{ + Msgs: []sdk.Msg{msg}, + Timestamp: time.Now().UnixNano(), + Save: false, + }) } - - h.processedMsgs = append(h.processedMsgs, nodetypes.ProcessedMsgs{ - Msgs: []sdk.Msg{msg}, - Timestamp: time.Now().UnixNano(), - Save: false, - }) } return nil } diff --git a/executor/host/host.go b/executor/host/host.go index 30b98e6..59f1f5c 100644 --- a/executor/host/host.go +++ b/executor/host/host.go @@ -2,19 +2,19 @@ package host import ( "context" + "fmt" + + "go.uber.org/zap" - "cosmossdk.io/core/address" ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" executortypes "github.com/initia-labs/opinit-bots-go/executor/types" + "github.com/initia-labs/opinit-bots-go/node" nodetypes "github.com/initia-labs/opinit-bots-go/node/types" "github.com/initia-labs/opinit-bots-go/types" - "go.uber.org/zap" - - "github.com/initia-labs/opinit-bots-go/node" + "cosmossdk.io/core/address" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" - sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -23,7 +23,7 @@ type childNode interface { AccountCodec() address.Codec HasKey() bool BroadcastMsgs(nodetypes.ProcessedMsgs) - RawKVProcessedData([]nodetypes.ProcessedMsgs, bool) ([]types.KV, error) + ProcessedMsgsToRawKV([]nodetypes.ProcessedMsgs, bool) ([]types.RawKV, error) QueryNextL1Sequence() (uint64, error) } @@ -34,7 +34,8 @@ type batchNode interface { var _ executortypes.DANode = &Host{} type Host struct { - version uint8 + version uint8 + relayOracle bool node *node.Node child childNode @@ -55,14 +56,18 @@ type Host struct { msgQueue []sdk.Msg } -func NewHost(version uint8, cfg nodetypes.NodeConfig, db types.DB, logger *zap.Logger, cdc codec.Codec, txConfig client.TxConfig) *Host { +func NewHost( + version uint8, relayOracle bool, cfg nodetypes.NodeConfig, + db types.DB, logger *zap.Logger, cdc codec.Codec, txConfig client.TxConfig, +) *Host { node, err := node.NewNode(cfg, db, logger, cdc, txConfig) if err != nil { panic(err) } h := &Host{ - version: version, + version: version, + relayOracle: relayOracle, node: node, @@ -93,11 +98,19 @@ func (h *Host) Initialize(child childNode, batch batchNode, bridgeId int64) (err } h.registerHandlers() + return nil } -func (h *Host) Start(ctx context.Context) { - h.node.Start(ctx, nodetypes.PROCESS_TYPE_DEFAULT) +func (h *Host) Start(ctx context.Context, errCh chan error) { + defer func() { + if r := recover(); r != nil { + h.logger.Error("host panic", zap.Any("recover", r)) + errCh <- fmt.Errorf("host panic: %v", r) + } + }() + + h.node.Start(ctx, errCh, nodetypes.PROCESS_TYPE_DEFAULT) } func (h *Host) registerHandlers() { @@ -114,11 +127,12 @@ func (h Host) BroadcastMsgs(msgs nodetypes.ProcessedMsgs) { if !h.node.HasKey() { return } + h.node.BroadcastMsgs(msgs) } -func (h Host) RawKVProcessedData(msgs []nodetypes.ProcessedMsgs, delete bool) ([]types.KV, error) { - return h.node.RawKVProcessedData(msgs, delete) +func (h Host) ProcessedMsgsToRawKV(msgs []nodetypes.ProcessedMsgs, delete bool) ([]types.RawKV, error) { + return h.node.ProcessedMsgsToRawKV(msgs, delete) } func (h *Host) SetBridgeId(brigeId int64) { @@ -132,3 +146,7 @@ func (h Host) AccountCodec() address.Codec { func (h Host) HasKey() bool { return h.node.HasKey() } + +func (ch Host) GetHeight() uint64 { + return ch.node.GetHeight() +} diff --git a/executor/host/oracle.go b/executor/host/oracle.go index e78f52b..f30f0dd 100644 --- a/executor/host/oracle.go +++ b/executor/host/oracle.go @@ -8,7 +8,13 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) -func (h *Host) oracleTxHandler(blockHeight uint64, tx comettypes.Tx) (sdk.Msg, error) { +// If the relay oracle is enabled and the extended commit info contains votes, create a new MsgUpdateOracle message. +// Else return nil. +func (h *Host) oracleTxHandler(blockHeight uint64, extCommitBz comettypes.Tx) (sdk.Msg, error) { + if !h.relayOracle { + return nil, nil + } + sender, err := h.child.GetAddressStr() if err != nil { return nil, err @@ -17,8 +23,9 @@ func (h *Host) oracleTxHandler(blockHeight uint64, tx comettypes.Tx) (sdk.Msg, e msg := opchildtypes.NewMsgUpdateOracle( sender, blockHeight, - tx, + extCommitBz, ) + err = msg.Validate(h.child.AccountCodec()) if err != nil { return nil, err diff --git a/executor/host/query.go b/executor/host/query.go index 431fff1..2db8075 100644 --- a/executor/host/query.go +++ b/executor/host/query.go @@ -24,7 +24,9 @@ func (h Host) QueryLastOutput() (*ophosttypes.QueryOutputProposalResponse, error Reverse: true, }, } - ctx := node.GetQueryContext(0) + ctx, cancel := node.GetQueryContext(0) + defer cancel() + res, err := h.ophostQueryClient.OutputProposals(ctx, req) if err != nil { return nil, err @@ -40,7 +42,9 @@ func (h Host) QueryOutput(outputIndex uint64) (*ophosttypes.QueryOutputProposalR BridgeId: uint64(h.bridgeId), OutputIndex: outputIndex, } - ctx := node.GetQueryContext(0) + ctx, cancel := node.GetQueryContext(0) + defer cancel() + return h.ophostQueryClient.OutputProposal(ctx, req) } diff --git a/executor/types/config.go b/executor/types/config.go index fef5079..42af0e1 100644 --- a/executor/types/config.go +++ b/executor/types/config.go @@ -7,54 +7,127 @@ import ( ) type Config struct { - Version uint8 `json:"version"` - HostNode nodetypes.NodeConfig `json:"host_node"` - ChildNode nodetypes.NodeConfig `json:"child_node"` - Batch BatchConfig `json:"batch"` + // Version is the version used to build output root. + Version uint8 `json:"version"` + + // ListenAddress is the address to listen for incoming requests. + ListenAddress string `json:"listen_address"` + + L1RPCAddress string `json:"l1_rpc_address"` + L2RPCAddress string `json:"l2_rpc_address"` + DARPCAddress string `json:"da_rpc_address"` + + L1GasPrice string `json:"l1_gas_price"` + L2GasPrice string `json:"l2_gas_price"` + DAGasPrice string `json:"da_gas_price"` + + L1ChainID string `json:"l1_chain_id"` + L2ChainID string `json:"l2_chain_id"` + DAChainID string `json:"da_chain_id"` + + // OutputSubmitterMnemonic is the mnemonic phrase for the output submitter, + // which is used to relay the output transaction from l2 to l1. + // + // If you don't want to use the output submitter feature, you can leave it empty. + OutputSubmitterMnemonic string `json:"output_submitter_mnemonic"` + + // BridgeExecutorMnemonic is the mnemonic phrase for the bridge executor, + // which is used to relay initiate token bridge transaction from l1 to l2. + // + // If you don't want to use the bridge executor feature, you can leave it empty. + BridgeExecutorMnemonic string `json:"bridge_executor_mnemonic"` + + BatchSubmitterMnemonic string `json:"batch_submitter_mnemonic"` + + // RelayOracle is the flag to enable the oracle relay feature. + RelayOracle bool `json:"relay_oracle"` + + MaxChunks int64 `json:"max_chunks"` + MaxChunkSize int64 `json:"max_chunk_size"` + MaxSubmissionTime int64 `json:"max_submission_time"` // seconds +} + +type HostConfig struct { + nodetypes.NodeConfig + RelayOracle bool `json:"relay_oracle"` } func DefaultConfig() *Config { return &Config{ - Version: 1, - HostNode: nodetypes.NodeConfig{ - RPC: "tcp://localhost:26657", - ChainID: "localhost", - }, - ChildNode: nodetypes.NodeConfig{ - RPC: "tcp://localhost:27657", - ChainID: "l2", - Mnemonic: "", - GasPrice: "0.15umin", - }, + Version: 1, + ListenAddress: "tcp://localhost:3000", + + L1RPCAddress: "tcp://localhost:26657", + L2RPCAddress: "tcp://localhost:27657", + + L1GasPrice: "0.15uinit", + L2GasPrice: "", + + L1ChainID: "testnet-l1-1", + L2ChainID: "testnet-l2-1", + + OutputSubmitterMnemonic: "", + BridgeExecutorMnemonic: "", } } func (cfg Config) Validate() error { - if cfg.HostNode.ChainID == "" { - return errors.New("L1 chain ID is required") + if cfg.Version == 0 { + return errors.New("version is required") } - if cfg.HostNode.RPC == "" { + if cfg.L1RPCAddress == "" { return errors.New("L1 RPC URL is required") } + if cfg.L2RPCAddress == "" { + return errors.New("L2 RPC URL is required") + } + if cfg.DARPCAddress == "" { + return errors.New("L2 RPC URL is required") + } - if cfg.ChildNode.ChainID == "" { + if cfg.L1ChainID == "" { + return errors.New("L1 chain ID is required") + } + if cfg.L2ChainID == "" { return errors.New("L2 chain ID is required") } - - if cfg.ChildNode.RPC == "" { + if cfg.DAChainID == "" { return errors.New("L2 RPC URL is required") } - - if cfg.Version == 0 { - return errors.New("Bridge ID is required") + if cfg.ListenAddress == "" { + return errors.New("listen address is required") } return nil } +func (cfg Config) L1NodeConfig() nodetypes.NodeConfig { + return nodetypes.NodeConfig{ + RPC: cfg.L1RPCAddress, + ChainID: cfg.L1ChainID, + Mnemonic: cfg.OutputSubmitterMnemonic, + } +} + +func (cfg Config) L2NodeConfig() nodetypes.NodeConfig { + return nodetypes.NodeConfig{ + RPC: cfg.L2RPCAddress, + ChainID: cfg.L2ChainID, + Mnemonic: cfg.BridgeExecutorMnemonic, + } +} + +func (cfg Config) DANodeConfig() nodetypes.NodeConfig { + return nodetypes.NodeConfig{ + RPC: cfg.DARPCAddress, + ChainID: cfg.DAChainID, + Mnemonic: cfg.BatchSubmitterMnemonic, + } +} + type BatchConfig struct { - DANode nodetypes.NodeConfig `json:"da_node"` - MaxChunks int64 `json:"max_chunks"` - MaxChunkSize int64 `json:"max_chunk_size"` - MaxSubmissionTime int64 `json:"max_submission_time"` // seconds + nodetypes.NodeConfig + MaxChunks int64 `json:"max_chunks"` + MaxChunkSize int64 `json:"max_chunk_size"` + MaxSubmissionTime int64 `json:"max_submission_time"` // seconds } diff --git a/executor_sample.json b/executor_sample.json deleted file mode 100644 index 2cdeaa4..0000000 --- a/executor_sample.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "version": 1, - "host_node": { - "rpc": "tcp://localhost:26657", - "chain_id": "localnet", - "mnemonic": "", - "gas_price": "0.15uinit" - }, - "child_node": { - "rpc": "tcp://localhost:27657", - "chain_id": "l2", - "mnemonic": "", - "gas_price": "0.15umin" - }, - "batch": { - "batch_node": { - "chain_id": "localnet" - }, - "max_batch_size": 300000 - } -} diff --git a/go.mod b/go.mod index 20038c5..b12ea32 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,14 @@ module github.com/initia-labs/opinit-bots-go go 1.22.2 require ( - github.com/cometbft/cometbft v0.38.8 - github.com/cosmos/cosmos-sdk v0.50.7 - github.com/initia-labs/OPinit v0.3.3-0.20240701023931-9e96e2798ae1 - github.com/initia-labs/initia v0.3.3 + github.com/cometbft/cometbft v0.38.9 + github.com/cosmos/cosmos-sdk v0.50.8 + github.com/gofiber/fiber/v2 v2.52.5 + github.com/initia-labs/OPinit v0.4.0 + github.com/initia-labs/initia v0.4.0 github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.18.2 + gopkg.in/yaml.v2 v2.4.0 ) require ( @@ -28,11 +30,7 @@ require ( github.com/aws/aws-sdk-go v1.44.312 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bits-and-blooms/bitset v1.13.0 // indirect - github.com/bytedance/sonic v1.11.6 // indirect - github.com/bytedance/sonic/loader v0.1.1 // indirect github.com/chzyer/readline v1.5.1 // indirect - github.com/cloudwego/base64x v0.1.4 // indirect - github.com/cloudwego/iasm v0.2.0 // indirect github.com/cockroachdb/apd/v2 v2.0.2 // indirect github.com/cosmos/gogogateway v1.2.0 // indirect github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.0.2 // indirect @@ -40,14 +38,9 @@ require ( github.com/cosmos/ibc-go/v8 v8.2.1 // indirect github.com/cosmos/interchain-security/v5 v5.0.0-rc0 // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect - github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-playground/locales v0.14.1 // indirect - github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.20.0 // indirect - github.com/goccy/go-json v0.10.2 // indirect - github.com/gofiber/fiber/v2 v2.52.5 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect @@ -69,20 +62,15 @@ require ( github.com/huandu/skiplist v1.2.0 // indirect github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/improbable-eng/grpc-web v0.15.0 // indirect - github.com/initia-labs/OPinit/api v0.3.1 // indirect - github.com/initia-labs/movevm v0.3.3 // indirect + github.com/initia-labs/OPinit/api v0.4.0 // indirect + github.com/initia-labs/movevm v0.3.4 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/cpuid/v2 v2.2.7 // indirect - github.com/leodido/go-urn v1.4.0 // indirect github.com/lib/pq v1.10.9 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/minio/highwayhash v1.0.2 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect github.com/noble-assets/forwarding/v2 v2.0.0-20240514101621-172acc02aac6 // indirect github.com/oklog/run v1.1.0 // indirect github.com/rakyll/statik v0.1.7 // indirect @@ -90,7 +78,6 @@ require ( github.com/rs/cors v1.8.3 // indirect github.com/skip-mev/block-sdk/v2 v2.1.2 // indirect github.com/skip-mev/slinky v0.4.3 // indirect - github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect @@ -102,12 +89,10 @@ require ( go.opentelemetry.io/otel v1.22.0 // indirect go.opentelemetry.io/otel/metric v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect - golang.org/x/arch v0.8.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.162.0 // indirect google.golang.org/appengine v1.6.8 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect nhooyr.io/websocket v1.8.6 // indirect ) @@ -210,7 +195,7 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/testify v1.9.0 // indirect + github.com/stretchr/testify v1.9.0 github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d github.com/tendermint/go-amino v0.16.0 // indirect @@ -220,7 +205,7 @@ require ( go.etcd.io/bbolt v1.3.8 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.24.0 // indirect + golang.org/x/crypto v0.24.0 golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0 // indirect golang.org/x/net v0.26.0 // indirect golang.org/x/sync v0.7.0 // indirect @@ -239,9 +224,4 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) -replace ( - github.com/cosmos/cosmos-sdk => github.com/initia-labs/cosmos-sdk v0.0.0-20240617110036-2411fa5fd7f8 - github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 - - github.com/initia-labs/OPinit => ../opinit -) +replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 diff --git a/go.sum b/go.sum index 78e97df..35fbd29 100644 --- a/go.sum +++ b/go.sum @@ -287,10 +287,6 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= -github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= -github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= -github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= -github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -317,10 +313,6 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= -github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= -github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= -github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -348,8 +340,8 @@ github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/cometbft/cometbft v0.38.8 h1:XyJ9Cu3xqap6xtNxiemrO8roXZ+KS2Zlu7qQ0w1trvU= -github.com/cometbft/cometbft v0.38.8/go.mod h1:xOoGZrtUT+A5izWfHSJgl0gYZUE7lu7Z2XIS1vWG/QQ= +github.com/cometbft/cometbft v0.38.9 h1:cJBJBG0mPKz+sqelCi/hlfZjadZQGdDNnu6YQ1ZsUHQ= +github.com/cometbft/cometbft v0.38.9/go.mod h1:xOoGZrtUT+A5izWfHSJgl0gYZUE7lu7Z2XIS1vWG/QQ= github.com/cometbft/cometbft-db v0.10.0 h1:VMBQh88zXn64jXVvj39tlu/IgsGR84T7ImjS523DCiU= github.com/cometbft/cometbft-db v0.10.0/go.mod h1:7RR7NRv99j7keWJ5IkE9iZibUTKYdtepXTp7Ra0FxKk= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= @@ -366,6 +358,8 @@ github.com/cosmos/cosmos-db v1.0.2 h1:hwMjozuY1OlJs/uh6vddqnk9j7VamLv+0DBlbEXbAK github.com/cosmos/cosmos-db v1.0.2/go.mod h1:Z8IXcFJ9PqKK6BIsVOB3QXtkKoqUOp1vRvPT39kOXEA= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= +github.com/cosmos/cosmos-sdk v0.50.8 h1:2UJHssUaGHTl4/dFp8xyREKAnfiRU6VVfqtKG9n8w5g= +github.com/cosmos/cosmos-sdk v0.50.8/go.mod h1:Zb+DgHtiByNwgj71IlJBXwOq6dLhtyAq3AgqpXm/jHo= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= @@ -469,8 +463,6 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= -github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= -github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -496,18 +488,12 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= -github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= -github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= -github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= @@ -741,14 +727,14 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/initia-labs/OPinit/api v0.3.1 h1:qCCtRZE9yw53FkBxlsOMBozMRGwz2EZVvNoDnZwfz04= -github.com/initia-labs/OPinit/api v0.3.1/go.mod h1:Xy/Nt3ubXLQ4zKn0m7RuQOM1sj8TVdlNNyek21TGYR0= -github.com/initia-labs/cosmos-sdk v0.0.0-20240617110036-2411fa5fd7f8 h1:+LGONkDS+HUB7TOTUOMf2mD4Oj+HMu3f2ADyYLNfeGc= -github.com/initia-labs/cosmos-sdk v0.0.0-20240617110036-2411fa5fd7f8/go.mod h1:lVkRY6cdMJ0fG3gp8y4hFrsKZqF4z7y0M2UXFb9Yt40= -github.com/initia-labs/initia v0.3.3 h1:82ZkXki6CG+F+rPDBVpTzzSQY8NalXIZ0LnYSWNd+3U= -github.com/initia-labs/initia v0.3.3/go.mod h1:1yWifo9GnhIvwDtCTTN6kb2mfq2On+oel6ha/rBXaQQ= -github.com/initia-labs/movevm v0.3.3 h1:xUH5VvjBSfJP4jg3axefmBlcdZ/7qfVYnUU09R4oN4g= -github.com/initia-labs/movevm v0.3.3/go.mod h1:6MxR4GP5zH3JUc1IMgfqAe1e483mZVS7fshPknZPJ30= +github.com/initia-labs/OPinit v0.4.0 h1:ICfZRbJ+KqgWt9z8seMl+6sRKbm40ELFvruvWh/KD40= +github.com/initia-labs/OPinit v0.4.0/go.mod h1:qu3h66dk3EF3Ehnr0bRuq0ezxzVKtQvy7G/waQFG9nM= +github.com/initia-labs/OPinit/api v0.4.0 h1:i3Cz5qPFpezeYh2R9TW2riPBb5tHwLOWOx3hrcWrM+o= +github.com/initia-labs/OPinit/api v0.4.0/go.mod h1:Xy/Nt3ubXLQ4zKn0m7RuQOM1sj8TVdlNNyek21TGYR0= +github.com/initia-labs/initia v0.4.0 h1:iJeFK5uaHF90YFyKoPztUH2hem5eKDpz42kg2dqJYB8= +github.com/initia-labs/initia v0.4.0/go.mod h1:dWCjY44C38bLlyTPXdXxzdXLwh3X1bAsiARJ0jfVqDg= +github.com/initia-labs/movevm v0.3.4 h1:kzqs6uzTq0f5peZJNzLq/1qgnmAFfC+I9eCyPBducxM= +github.com/initia-labs/movevm v0.3.4/go.mod h1:6MxR4GP5zH3JUc1IMgfqAe1e483mZVS7fshPknZPJ30= github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -780,10 +766,6 @@ github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8 github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= -github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -795,8 +777,6 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= -github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= @@ -1074,14 +1054,9 @@ github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= -github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= -github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -1155,9 +1130,6 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= -golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1410,7 +1382,6 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= @@ -1791,11 +1762,9 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/main.go b/main.go deleted file mode 100644 index 61fb98e..0000000 --- a/main.go +++ /dev/null @@ -1,22 +0,0 @@ -package main - -import ( - "context" - "os" - - "github.com/initia-labs/opinit-bots-go/cmd" -) - -// TODO: use cmd package to build and run the bot -// just test the bot with this main function - -func main() { - rootCmd := cmd.NewRootCmd() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - if err := rootCmd.ExecuteContext(ctx); err != nil { - os.Exit(1) - } -} diff --git a/merkle/merkle.go b/merkle/merkle.go index e63cadf..ac7b197 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -1,50 +1,93 @@ package merkle import ( + "crypto/rand" "encoding/json" "errors" + "fmt" "math/bits" merkletypes "github.com/initia-labs/opinit-bots-go/merkle/types" types "github.com/initia-labs/opinit-bots-go/types" ) +// NodeGeneratorFn is a function type that generates parent node from two child nodes. +// +// CONTRACT: It should generate return same result for same inputs even the order of inputs are swapped. +type NodeGeneratorFn func([]byte, []byte) [32]byte + +// Merkle is a struct that manages the merkle tree which only holds the last sibling +// of each level(height) to minimize the memory usage. type Merkle struct { - db types.DB - workingTree merkletypes.TreeInfo + db types.DB + workingTree *merkletypes.TreeInfo + nodeGeneratorFn NodeGeneratorFn +} + +// Check if the node generator function is commutative +func validateNodeGeneratorFn(fn NodeGeneratorFn) error { + randInput1 := make([]byte, 32) + randInput2 := make([]byte, 32) + rand.Read(randInput1) + rand.Read(randInput2) + + node1 := fn(randInput1, randInput2) + node2 := fn(randInput2, randInput1) + + if node1 != node2 { + return errors.New("node generator function is not commutative") + } - nodeGeneratorFn func([]byte, []byte) [32]byte + return nil } -func NewMerkle(db types.DB, nodeGeneratorFn func([]byte, []byte) [32]byte) *Merkle { +func NewMerkle(db types.DB, nodeGeneratorFn NodeGeneratorFn) (*Merkle, error) { + err := validateNodeGeneratorFn(nodeGeneratorFn) + if err != nil { + return nil, err + } + return &Merkle{ db: db, nodeGeneratorFn: nodeGeneratorFn, - } + }, nil } -func (m *Merkle) SetNewWorkingTree(treeIndex uint64, startLeafIndex uint64) { - m.workingTree = merkletypes.TreeInfo{ +// InitializeWorkingTree resets the working tree with the given tree index and start leaf index. +func (m *Merkle) InitializeWorkingTree(treeIndex uint64, startLeafIndex uint64) error { + if m.workingTree != nil && !m.workingTree.Done { + return fmt.Errorf("failed to initialize working tree (`%d`); working tree is not finalized", treeIndex) + } + + if treeIndex < 1 || startLeafIndex < 1 { + return fmt.Errorf("failed to initialize working tree (`%d`, `%d`); invalid index", treeIndex, startLeafIndex) + } + + m.workingTree = &merkletypes.TreeInfo{ Index: treeIndex, StartLeafIndex: startLeafIndex, LeafCount: 0, - HeightData: make(map[uint8][]byte), + LastSiblings: make(map[uint8][]byte), Done: false, } + + return nil } -func (m *Merkle) FinalizeWorkingTree(extraData []byte) ([]types.KV, []byte, error) { +// FinalizeWorkingTree finalizes the working tree and returns the finalized tree info. +func (m *Merkle) FinalizeWorkingTree(extraData []byte) ([]types.RawKV, []byte /* root */, error) { m.workingTree.Done = true if m.workingTree.LeafCount == 0 { return nil, merkletypes.EmptyRootHash[:], nil } - kvs, err := m.fillRestLeaves() + + err := m.fillLeaves() if err != nil { return nil, nil, err } - treeRootHash := m.workingTree.HeightData[m.Height()] - tree := merkletypes.FinalizedTreeInfo{ + treeRootHash := m.workingTree.LastSiblings[m.Height()] + finalizedTreeInfo := merkletypes.FinalizedTreeInfo{ TreeIndex: m.workingTree.Index, TreeHeight: m.Height(), Root: treeRootHash, @@ -53,19 +96,24 @@ func (m *Merkle) FinalizeWorkingTree(extraData []byte) ([]types.KV, []byte, erro ExtraData: extraData, } - data, err := json.Marshal(tree) + data, err := json.Marshal(finalizedTreeInfo) if err != nil { return nil, nil, err } - kvs = append(kvs, types.KV{ - Key: m.db.PrefixedKey(merkletypes.PrefixedFinalizedTreeKey(tree.StartLeafIndex)), + // Save the finalized tree info with the start leaf index as the key, + // when we need to get the proofs for the leaf, we can get the tree info with the start leaf index. + kvs := []types.RawKV{{ + Key: m.db.PrefixedKey(finalizedTreeInfo.Key()), Value: data, - }) + }} return kvs, treeRootHash, err } +// LoadWorkingTree loads the working tree from the database. +// +// It is used to load the working tree to handle the case where the bot is stopped. func (m *Merkle) LoadWorkingTree(version uint64) error { data, err := m.db.Get(merkletypes.PrefixedWorkingTreeKey(version)) if err != nil { @@ -77,13 +125,18 @@ func (m *Merkle) LoadWorkingTree(version uint64) error { if err != nil { return err } else if workingTree.Done { - m.SetNewWorkingTree(workingTree.Index+1, workingTree.StartLeafIndex+workingTree.LeafCount) - return nil + nextTreeIndex := workingTree.Index + 1 + nextStartLeafIndex := workingTree.StartLeafIndex + workingTree.LeafCount + return m.InitializeWorkingTree(nextTreeIndex, nextStartLeafIndex) } - m.workingTree = workingTree + + m.workingTree = &workingTree return nil } +// SaveWorkingTree saves the working tree to the database. +// +// It is used to save the working tree to handle the case where the bot is stopped. func (m *Merkle) SaveWorkingTree(version uint64) error { data, err := json.Marshal(&m.workingTree) if err != nil { @@ -92,107 +145,120 @@ func (m *Merkle) SaveWorkingTree(version uint64) error { return m.db.Set(merkletypes.PrefixedWorkingTreeKey(version), data) } -func (m *Merkle) GetKVWorkingTree() (types.KV, error) { - data, err := json.Marshal(&m.workingTree) - if err != nil { - return types.KV{}, err - } - return types.KV{ - Key: merkletypes.WorkingTreeKey, - Value: data, - }, nil -} - +// Height returns the height of the working tree. func (m *Merkle) Height() uint8 { if m.workingTree.LeafCount <= 1 { return uint8(m.workingTree.LeafCount) } + return uint8(bits.Len64(m.workingTree.LeafCount - 1)) } +// GetWorkingTreeIndex returns the index of the working tree. func (m *Merkle) GetWorkingTreeIndex() uint64 { return m.workingTree.Index } +// GetWorkingTreeLeafCount returns the leaf count of the working tree. func (m *Merkle) GetWorkingTreeLeafCount() uint64 { return m.workingTree.LeafCount } -func (m *Merkle) saveNode(height uint8, heightIndex uint64, data []byte) error { - return m.db.Set(merkletypes.PrefixedNodeKey(m.GetWorkingTreeIndex(), height, heightIndex), data) +func (m *Merkle) saveNode(height uint8, localNodeIndex uint64, data []byte) error { + return m.db.Set(merkletypes.PrefixedNodeKey(m.GetWorkingTreeIndex(), height, localNodeIndex), data) } -func (m *Merkle) getNode(treeIndex uint64, height uint8, heightIndex uint64) ([]byte, error) { - return m.db.Get(merkletypes.PrefixedNodeKey(treeIndex, height, heightIndex)) +func (m *Merkle) getNode(treeIndex uint64, height uint8, localNodeIndex uint64) ([]byte, error) { + return m.db.Get(merkletypes.PrefixedNodeKey(treeIndex, height, localNodeIndex)) } -func (m *Merkle) fillRestLeaves() ([]types.KV, error) { - kvs := make([]types.KV, 0) - leaf := m.workingTree.HeightData[0] - +// fillLeaves fills the rest of the leaves with the last leaf. +func (m *Merkle) fillLeaves() error { numRestLeaves := 1<<(m.Height()) - m.workingTree.LeafCount + if numRestLeaves == 0 { + return nil + } + + lastLeaf := m.workingTree.LastSiblings[0] + //nolint:typecheck for range numRestLeaves { - err := m.InsertLeaf(leaf, true) - if err != nil { - return nil, err + if err := m.InsertLeaf(lastLeaf); err != nil { + return err } } - return kvs, nil + + // leaf count increased with dummy values during the fill + // process, so decrease it back to keep l2 withdrawal sequence mapping. + m.workingTree.LeafCount -= numRestLeaves + + return nil } -func (m *Merkle) InsertLeaf(data []byte, residue bool) error { +// InsertLeaf inserts a leaf to the working tree. +// +// It updates the last sibling of each level until the root. +func (m *Merkle) InsertLeaf(data []byte) error { height := uint8(0) - heightIndex := m.workingTree.LeafCount + localNodeIndex := m.workingTree.LeafCount for { - err := m.saveNode(height, heightIndex, data) + // save the node with the given level and localLeafIndex + err := m.saveNode(height, localNodeIndex, data) if err != nil { return err } - sibling := m.workingTree.HeightData[height] - m.workingTree.HeightData[height] = data - if heightIndex%2 == 0 { + + sibling := m.workingTree.LastSiblings[height] + m.workingTree.LastSiblings[height] = data + if localNodeIndex%2 == 0 { break } + + // if localLeafIndex is odd, calculate parent node nodeHash := m.nodeGeneratorFn(sibling, data) data = nodeHash[:] - heightIndex = heightIndex / 2 + localNodeIndex = localNodeIndex / 2 height++ } - if !residue { - m.workingTree.LeafCount++ - } + m.workingTree.LeafCount++ + return nil } -func (m *Merkle) GetProofs(leafIndex uint64) ([][]byte, uint64, []byte, []byte, error) { +// GetProofs returns the proofs for the leaf with the given index. +func (m *Merkle) GetProofs(leafIndex uint64) (proofs [][]byte, treeIndex uint64, rootData []byte, extraData []byte, err error) { _, value, err := m.db.SeekPrevInclusiveKey(merkletypes.FinalizedTreeKey, merkletypes.PrefixedFinalizedTreeKey(leafIndex)) if err != nil { return nil, 0, nil, nil, err } var treeInfo merkletypes.FinalizedTreeInfo - err = json.Unmarshal(value, &treeInfo) - if err != nil { + if err := json.Unmarshal(value, &treeInfo); err != nil { return nil, 0, nil, nil, err } - proofs := make([][]byte, 0) - height := uint8(0) + // Check if the leaf index is in the tree if leafIndex < treeInfo.StartLeafIndex || leafIndex-treeInfo.StartLeafIndex >= treeInfo.LeafCount { - return nil, 0, nil, nil, errors.New("not found") + return nil, 0, nil, nil, fmt.Errorf("leaf (`%d`) is not found in tree (`%d`)", leafIndex, treeInfo.TreeIndex) } - localIndex := leafIndex - treeInfo.StartLeafIndex + height := uint8(0) + localNodeIndex := leafIndex - treeInfo.StartLeafIndex for height < treeInfo.TreeHeight { - sibling, err := m.getNode(treeInfo.TreeIndex, height, localIndex^1) + siblingIndex := localNodeIndex ^ 1 // flip the last bit to find the sibling + sibling, err := m.getNode(treeInfo.TreeIndex, height, siblingIndex) if err != nil { return nil, 0, nil, nil, err } + + // append the sibling to the proofs proofs = append(proofs, sibling) + + // update iteration variables height++ - localIndex = localIndex / 2 + localNodeIndex = localNodeIndex / 2 } + return proofs, treeInfo.TreeIndex, treeInfo.Root, treeInfo.ExtraData, nil } diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go new file mode 100644 index 0000000..9d5ca7d --- /dev/null +++ b/merkle/merkle_test.go @@ -0,0 +1,177 @@ +package merkle + +import ( + "encoding/json" + "testing" + + "golang.org/x/crypto/sha3" + + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots-go/db" + merkletypes "github.com/initia-labs/opinit-bots-go/merkle/types" + "github.com/stretchr/testify/require" +) + +func Test_validateNodeGeneratorFn(t *testing.T) { + fnNonCommutable := func(a, b []byte) [32]byte { + return sha3.Sum256(append(a, b...)) + } + + fnCommutable := ophosttypes.GenerateNodeHash + require.Error(t, validateNodeGeneratorFn(fnNonCommutable)) + require.NoError(t, validateNodeGeneratorFn(fnCommutable)) +} + +func Test_MerkleTree_LastSibling(t *testing.T) { + tempDir := t.TempDir() + db, err := db.NewDB(tempDir) + require.NoError(t, err) + + hashFn := ophosttypes.GenerateNodeHash + m, err := NewMerkle(db, hashFn) + require.NoError(t, err) + + require.NoError(t, m.InitializeWorkingTree(1, 1)) + + // empty tree + require.Len(t, m.workingTree.LastSiblings, 0) + + // 1 node + require.NoError(t, m.InsertLeaf([]byte("node1"))) + require.Len(t, m.workingTree.LastSiblings, 1) + require.Equal(t, []byte("node1"), m.workingTree.LastSiblings[0]) + + // 2 nodes + hash12 := hashFn([]byte("node1"), []byte("node2")) + require.NoError(t, m.InsertLeaf([]byte("node2"))) + require.Len(t, m.workingTree.LastSiblings, 2) + require.Equal(t, []byte("node2"), m.workingTree.LastSiblings[0]) + require.Equal(t, hash12[:], m.workingTree.LastSiblings[1]) + + // 3 nodes + require.NoError(t, m.InsertLeaf([]byte("node3"))) + require.Len(t, m.workingTree.LastSiblings, 2) + require.Equal(t, []byte("node3"), m.workingTree.LastSiblings[0]) + require.Equal(t, hash12[:], m.workingTree.LastSiblings[1]) + + // 4 nodes + hash34 := hashFn([]byte("node3"), []byte("node4")) + hash1234 := hashFn(hash12[:], hash34[:]) + require.NoError(t, m.InsertLeaf([]byte("node4"))) + require.Len(t, m.workingTree.LastSiblings, 3) + require.Equal(t, []byte("node4"), m.workingTree.LastSiblings[0]) + require.Equal(t, hash34[:], m.workingTree.LastSiblings[1]) + require.Equal(t, hash1234[:], m.workingTree.LastSiblings[2]) + + // 5 nodes + require.NoError(t, m.InsertLeaf([]byte("node5"))) + require.Len(t, m.workingTree.LastSiblings, 3) + require.Equal(t, []byte("node5"), m.workingTree.LastSiblings[0]) + require.Equal(t, hash34[:], m.workingTree.LastSiblings[1]) + require.Equal(t, hash1234[:], m.workingTree.LastSiblings[2]) + + // 6 nodes + hash56 := hashFn([]byte("node5"), []byte("node6")) + require.NoError(t, m.InsertLeaf([]byte("node6"))) + require.Len(t, m.workingTree.LastSiblings, 3) + require.Equal(t, []byte("node6"), m.workingTree.LastSiblings[0]) + require.Equal(t, hash56[:], m.workingTree.LastSiblings[1]) + require.Equal(t, hash1234[:], m.workingTree.LastSiblings[2]) +} + +func Test_FinalizeWorkingTree(t *testing.T) { + tempDir := t.TempDir() + db, err := db.NewDB(tempDir) + require.NoError(t, err) + + hashFn := ophosttypes.GenerateNodeHash + m, err := NewMerkle(db, hashFn) + require.NoError(t, err) + + require.NoError(t, m.InitializeWorkingTree(1, 1)) + + // empty tree + kvs, root, err := m.FinalizeWorkingTree(nil) + require.NoError(t, err) + require.Len(t, kvs, 0) + require.Equal(t, merkletypes.EmptyRootHash[:], root) + + // insert 6 nodes + require.NoError(t, m.InsertLeaf([]byte("node1"))) + require.NoError(t, m.InsertLeaf([]byte("node2"))) + require.NoError(t, m.InsertLeaf([]byte("node3"))) + require.NoError(t, m.InsertLeaf([]byte("node4"))) + require.NoError(t, m.InsertLeaf([]byte("node5"))) + require.NoError(t, m.InsertLeaf([]byte("node6"))) + + hash12 := hashFn([]byte("node1"), []byte("node2")) + hash34 := hashFn([]byte("node3"), []byte("node4")) + hash56 := hashFn([]byte("node5"), []byte("node6")) + hash66 := hashFn([]byte("node6"), []byte("node6")) + hash1234 := hashFn(hash12[:], hash34[:]) + hash5666 := hashFn(hash56[:], hash66[:]) + hashRoot := hashFn(hash1234[:], hash5666[:]) + + extraData := []byte("extra data") + kvs, root, err = m.FinalizeWorkingTree(extraData) + require.NoError(t, err) + require.Equal(t, hashRoot[:], root) + require.Len(t, kvs, 1) + + var info merkletypes.FinalizedTreeInfo + require.NoError(t, json.Unmarshal(kvs[0].Value, &info)) + require.Equal(t, merkletypes.FinalizedTreeInfo{ + TreeIndex: 1, + TreeHeight: 3, + Root: hashRoot[:], + StartLeafIndex: 1, + LeafCount: 6, + ExtraData: extraData, + }, info) +} + +func Test_GetProofs(t *testing.T) { + tempDir := t.TempDir() + db, err := db.NewDB(tempDir) + require.NoError(t, err) + + hashFn := ophosttypes.GenerateNodeHash + m, err := NewMerkle(db, hashFn) + require.NoError(t, err) + + require.NoError(t, m.InitializeWorkingTree(1, 1)) + + // insert 6 nodes + require.NoError(t, m.InsertLeaf([]byte("node1"))) + require.NoError(t, m.InsertLeaf([]byte("node2"))) + require.NoError(t, m.InsertLeaf([]byte("node3"))) + require.NoError(t, m.InsertLeaf([]byte("node4"))) + require.NoError(t, m.InsertLeaf([]byte("node5"))) + require.NoError(t, m.InsertLeaf([]byte("node6"))) + + hash12 := hashFn([]byte("node1"), []byte("node2")) + hash34 := hashFn([]byte("node3"), []byte("node4")) + hash56 := hashFn([]byte("node5"), []byte("node6")) + hash66 := hashFn([]byte("node6"), []byte("node6")) + hash1234 := hashFn(hash12[:], hash34[:]) + hash5666 := hashFn(hash56[:], hash66[:]) + hashRoot := hashFn(hash1234[:], hash5666[:]) + + extraData := []byte("extra data") + kvs, root, err := m.FinalizeWorkingTree(extraData) + require.NoError(t, err) + require.Equal(t, hashRoot[:], root) + + // store batch kvs to db + require.NoError(t, db.RawBatchSet(kvs...)) + + proofs, treeIndex, root_, extraData, err := m.GetProofs(1) + require.NoError(t, err) + require.Equal(t, uint64(1), treeIndex) + require.Equal(t, root, root_) + require.Equal(t, []byte("extra data"), extraData) + require.Len(t, proofs, 3) + require.Equal(t, []byte("node2"), proofs[0]) + require.Equal(t, hash34[:], proofs[1]) + require.Equal(t, hash5666[:], proofs[2]) +} diff --git a/merkle/types/db.go b/merkle/types/db.go index a27fd47..6d6e14d 100644 --- a/merkle/types/db.go +++ b/merkle/types/db.go @@ -1,19 +1,35 @@ package types type TreeInfo struct { - Index uint64 `json:"index"` - LeafCount uint64 `json:"leaf_count"` - StartLeafIndex uint64 `json:"start_leaf_index"` - HeightData map[uint8][]byte `json:"height_data"` - Done bool `json:"done"` + // Index of the tree used as prefix for the keys + Index uint64 `json:"index"` + + // Number of leaves in the tree + LeafCount uint64 `json:"leaf_count"` + + // Cumulative number of leaves all the way up to the current tree + StartLeafIndex uint64 `json:"start_leaf_index"` + + // Last sibling of the height(level) of the tree + LastSiblings map[uint8][]byte `json:"height_data"` + + // Flag to indicate if the tree is finalized + Done bool `json:"done"` } type FinalizedTreeInfo struct { + // TreeIndex is the index of the tree used as prefix for the keys, + // which is incremented by 1 for each new tree. TreeIndex uint64 `json:"tree_index"` TreeHeight uint8 `json:"tree_height"` Root []byte `json:"root"` - // used to identify the first leaf index of the tree + // StartLeafIndex is the cumulative number of leaves all the way up to the current tree. + // This approach helps to map the l2 withdrawal sequence to the tree index. StartLeafIndex uint64 `json:"start_leaf_index"` LeafCount uint64 `json:"leaf_count"` ExtraData []byte `json:"extra_data,omitempty"` } + +func (f FinalizedTreeInfo) Key() []byte { + return PrefixedFinalizedTreeKey(f.StartLeafIndex) +} diff --git a/merkle/types/key.go b/merkle/types/key.go index c00c0f8..23ca518 100644 --- a/merkle/types/key.go +++ b/merkle/types/key.go @@ -12,16 +12,16 @@ var ( NodeKey = []byte("node") ) -func GetNodeKey(treeIndex uint64, level uint8, levelIndex uint64) []byte { +func GetNodeKey(treeIndex uint64, height uint8, nodeIndex uint64) []byte { data := make([]byte, 17) binary.BigEndian.PutUint64(data, treeIndex) - data[8] = level - binary.BigEndian.PutUint64(data[9:], levelIndex) + data[8] = height + binary.BigEndian.PutUint64(data[9:], nodeIndex) return data } -func PrefixedNodeKey(treeIndex uint64, level uint8, levelIndex uint64) []byte { - return append(append(NodeKey, dbtypes.Splitter), GetNodeKey(treeIndex, level, levelIndex)...) +func PrefixedNodeKey(treeIndex uint64, height uint8, nodeIndex uint64) []byte { + return append(append(NodeKey, dbtypes.Splitter), GetNodeKey(treeIndex, height, nodeIndex)...) } func PrefixedFinalizedTreeKey(startLeafIndex uint64) []byte { diff --git a/node/account.go b/node/account.go index b8bda65..99e8e96 100644 --- a/node/account.go +++ b/node/account.go @@ -16,7 +16,7 @@ import ( var _ client.AccountRetriever = &Node{} func (n *Node) loadAccount() error { - account, _, err := n.GetAccountWithHeight(n.getClientCtx(), n.keyAddress) + account, err := n.GetAccount(n.getClientCtx(), n.keyAddress) if err != nil { return err } @@ -62,6 +62,7 @@ func (n *Node) GetAccountWithHeight(_ client.Context, addr sdk.AccAddress) (clie return nil, 0, fmt.Errorf("failed to parse block height: %w", err) } + //nolint:staticcheck var acc authtypes.AccountI if err := n.cdc.UnpackAny(res.Account, &acc); err != nil { return nil, 0, err diff --git a/node/db.go b/node/db.go index 6b5fd73..7382291 100644 --- a/node/db.go +++ b/node/db.go @@ -7,6 +7,7 @@ import ( "go.uber.org/zap" ) +<<<<<<< HEAD // should use safely func (n *Node) SetSyncInfo(height uint64) { n.lastProcessedBlockHeight = height @@ -14,10 +15,21 @@ func (n *Node) SetSyncInfo(height uint64) { func (n *Node) SaveSyncInfo(height uint64) error { return n.db.Set(nodetypes.LastProcessedBlockHeightKey, dbtypes.FromUint64(height)) +||||||| 222d087 +func (n *Node) SaveSyncInfo() error { + return n.db.Set(nodetypes.LastProcessedBlockHeightKey, dbtypes.FromUint64(n.lastProcessedBlockHeight)) +======= +////////////// +// SyncInfo // +////////////// + +func (n *Node) SaveSyncInfo() error { + return n.db.Set(nodetypes.LastProcessedBlockHeightKey, dbtypes.FromUint64(n.lastProcessedBlockHeight)) +>>>>>>> feat/executor } -func (n *Node) RawKVSyncInfo(height uint64) types.KV { - return types.KV{ +func (n *Node) SyncInfoToRawKV(height uint64) types.RawKV { + return types.RawKV{ Key: n.db.PrefixedKey(nodetypes.LastProcessedBlockHeightKey), Value: dbtypes.FromUint64(height), } @@ -30,11 +42,20 @@ func (n *Node) loadSyncInfo() error { } else if err != nil { return err } - n.lastProcessedBlockHeight = dbtypes.ToUint64(data) + + n.lastProcessedBlockHeight, err = dbtypes.ToUint64(data) + if err != nil { + return err + } + n.logger.Info("load sync info", zap.Uint64("last_processed_height", n.lastProcessedBlockHeight)) return nil } +/////////////// +// PendingTx // +/////////////// + func (n Node) savePendingTx(sequence uint64, txInfo nodetypes.PendingTxInfo) error { data, err := txInfo.Marshal() if err != nil { @@ -48,25 +69,27 @@ func (n Node) deletePendingTx(sequence uint64) error { } func (n *Node) loadPendingTxs() (txs []nodetypes.PendingTxInfo, err error) { - iterErr := n.db.PrefixedIterate(nodetypes.PendingTxsKey, func(_, value []byte) (stop bool) { + iterErr := n.db.PrefixedIterate(nodetypes.PendingTxsKey, func(_, value []byte) (stop bool, err error) { txInfo := nodetypes.PendingTxInfo{} err = txInfo.Unmarshal(value) if err != nil { - return true + return true, err } txs = append(txs, txInfo) - return false + return false, nil }) - if iterErr != nil { return nil, iterErr } + n.logger.Info("load pending txs", zap.Int("count", len(txs))) return txs, err } -func (n *Node) RawKVPendingTxs(txInfos []nodetypes.PendingTxInfo, delete bool) ([]types.KV, error) { - kvs := make([]types.KV, 0, len(txInfos)) +// PendingTxsToRawKV converts pending txs to raw kv pairs. +// If delete is true, it will return kv pairs for deletion (empty value). +func (n *Node) PendingTxsToRawKV(txInfos []nodetypes.PendingTxInfo, delete bool) ([]types.RawKV, error) { + kvs := make([]types.RawKV, 0, len(txInfos)) for _, txInfo := range txInfos { var data []byte var err error @@ -77,7 +100,7 @@ func (n *Node) RawKVPendingTxs(txInfos []nodetypes.PendingTxInfo, delete bool) ( return nil, err } } - kvs = append(kvs, types.KV{ + kvs = append(kvs, types.RawKV{ Key: n.db.PrefixedKey(nodetypes.PrefixedPendingTx(txInfo.Sequence)), Value: data, }) @@ -85,19 +108,51 @@ func (n *Node) RawKVPendingTxs(txInfos []nodetypes.PendingTxInfo, delete bool) ( return kvs, nil } +<<<<<<< HEAD +func (n *Node) RawKVProcessedData(processedData []nodetypes.ProcessedMsgs, delete bool) ([]types.KV, error) { + kvs := make([]types.KV, 0, len(processedData)) + for _, processedMsgs := range processedData { +||||||| 222d087 func (n *Node) RawKVProcessedData(processedData []nodetypes.ProcessedMsgs, delete bool) ([]types.KV, error) { kvs := make([]types.KV, 0, len(processedData)) for _, processedMsgs := range processedData { + if !processedMsgs.Save { + continue + } + +======= +/////////////////// +// ProcessedMsgs // +/////////////////// + +// ProcessedMsgsToRawKV converts processed data to raw kv pairs. +// If delete is true, it will return kv pairs for deletion (empty value). +func (n *Node) ProcessedMsgsToRawKV(ProcessedMsgs []nodetypes.ProcessedMsgs, delete bool) ([]types.RawKV, error) { + kvs := make([]types.RawKV, 0, len(ProcessedMsgs)) + for _, processedMsgs := range ProcessedMsgs { + if !processedMsgs.Save { + continue + } + +>>>>>>> feat/executor var data []byte var err error +<<<<<<< HEAD if !delete && processedMsgs.Save { data, err = processedMsgs.Marshal() +||||||| 222d087 + if !delete { + data, err = processedMsgs.Marshal() +======= + if !delete { + data, err = processedMsgs.MarshalInterfaceJSON(n.cdc) +>>>>>>> feat/executor if err != nil { return nil, err } } - kvs = append(kvs, types.KV{ + kvs = append(kvs, types.RawKV{ Key: n.db.PrefixedKey(nodetypes.PrefixedProcessedMsgs(uint64(processedMsgs.Timestamp))), Value: data, }) @@ -105,30 +160,31 @@ func (n *Node) RawKVProcessedData(processedData []nodetypes.ProcessedMsgs, delet return kvs, nil } -func (n *Node) saveProcessedMsgs(processedMsgs nodetypes.ProcessedMsgs) error { - data, err := processedMsgs.Marshal() - if err != nil { - return err - } - return n.db.Set(nodetypes.PrefixedProcessedMsgs(uint64(processedMsgs.Timestamp)), data) -} - -func (n *Node) loadProcessedData() (processedData []nodetypes.ProcessedMsgs, err error) { - iterErr := n.db.PrefixedIterate(nodetypes.ProcessedMsgsKey, func(_, value []byte) (stop bool) { - processedMsgs := nodetypes.ProcessedMsgs{} - err = processedMsgs.Unmarshal(value) +// currently no use case, but keep it for future use +// func (n *Node) saveProcessedMsgs(processedMsgs nodetypes.ProcessedMsgs) error { +// data, err := processedMsgs.Marshal() +// if err != nil { +// return err +// } +// return n.db.Set(nodetypes.PrefixedProcessedMsgs(uint64(processedMsgs.Timestamp)), data) +// } + +func (n *Node) loadProcessedMsgs() (ProcessedMsgs []nodetypes.ProcessedMsgs, err error) { + iterErr := n.db.PrefixedIterate(nodetypes.ProcessedMsgsKey, func(_, value []byte) (stop bool, err error) { + var processedMsgs nodetypes.ProcessedMsgs + err = processedMsgs.UnmarshalInterfaceJSON(n.cdc, value) if err != nil { - return true + return true, err } - processedData = append(processedData, processedMsgs) - return false + ProcessedMsgs = append(ProcessedMsgs, processedMsgs) + return false, nil }) if iterErr != nil { return nil, iterErr } - n.logger.Info("load pending processed msgs", zap.Int("count", len(processedData))) - return processedData, nil + n.logger.Info("load pending processed msgs", zap.Int("count", len(ProcessedMsgs))) + return ProcessedMsgs, nil } func (n *Node) deleteProcessedMsgs(timestamp int64) error { diff --git a/node/node.go b/node/node.go index 02ee22b..e0e4675 100644 --- a/node/node.go +++ b/node/node.go @@ -40,19 +40,22 @@ type Node struct { txf tx.Factory lastProcessedBlockHeight uint64 - pendingTxMu *sync.Mutex - pendingTxs []nodetypes.PendingTxInfo - pendingProcessedData []nodetypes.ProcessedMsgs + // local pending txs, which is following Queue data structure + pendingTxMu *sync.Mutex + pendingTxs []nodetypes.PendingTxInfo + + pendingProcessedMsgs []nodetypes.ProcessedMsgs txChannel chan nodetypes.ProcessedMsgs } func NewNode(cfg nodetypes.NodeConfig, db types.DB, logger *zap.Logger, cdc codec.Codec, txConfig client.TxConfig) (*Node, error) { - client, err := clienthttp.New(cfg.RPC, "/websocket") + client, err := client.NewClientFromNode(cfg.RPC) if err != nil { return nil, err } + // Use memory keyring for now // TODO: may use os keyring later keyBase, err := keyring.New(cfg.ChainID, "memory", "", nil, cdc) @@ -76,7 +79,7 @@ func NewNode(cfg nodetypes.NodeConfig, db types.DB, logger *zap.Logger, cdc code pendingTxMu: &sync.Mutex{}, pendingTxs: make([]nodetypes.PendingTxInfo, 0), - pendingProcessedData: make([]nodetypes.ProcessedMsgs, 0), + pendingProcessedMsgs: make([]nodetypes.ProcessedMsgs, 0), txChannel: make(chan nodetypes.ProcessedMsgs), } @@ -100,27 +103,37 @@ func NewNode(cfg nodetypes.NodeConfig, db types.DB, logger *zap.Logger, cdc code return nil, err } } + return n, nil } -func (n Node) Start(ctx context.Context, processType nodetypes.BlockProcessType) { - go n.txBroadcastLooper(ctx) +func (n Node) Start(ctx context.Context, errCh chan error, processType nodetypes.BlockProcessType) { + go func() { + err := n.txBroadcastLooper(ctx) + if err != nil { + errCh <- err + } + }() // broadcast pending msgs first before executing block process looper - for _, processedMsg := range n.pendingProcessedData { + // @dev: these pending processed data is filled at initialization(`NewNode`). + for _, processedMsg := range n.pendingProcessedMsgs { n.BroadcastMsgs(processedMsg) } - go n.blockProcessLooper(ctx, processType) + + go func() { + err := n.blockProcessLooper(ctx, processType) + if err != nil { + errCh <- err + } + }() } func (n Node) HasKey() bool { - if n.cfg.Mnemonic == "" { - return false - } - return true + return n.cfg.Mnemonic != "" } -func (n *Node) prepareBroadcaster(lastBlockTime time.Time) error { +func (n *Node) prepareBroadcaster(_ /*lastBlockHeight*/ uint64, lastBlockTime time.Time) error { _, err := n.keyBase.NewAccount(nodetypes.KEY_NAME, n.cfg.Mnemonic, "", hd.CreateHDPath(sdk.GetConfig().GetCoinType(), 0, 0).String(), hd.Secp256k1) if err != nil { return err @@ -152,7 +165,7 @@ func (n *Node) prepareBroadcaster(lastBlockTime time.Time) error { return err } - dbBatchKVs := make([]types.KV, 0) + dbBatchKVs := make([]types.RawKV, 0) loadedPendingTxs, err := n.loadPendingTxs() if err != nil { @@ -163,66 +176,71 @@ func (n *Node) prepareBroadcaster(lastBlockTime time.Time) error { pendingTxTime := time.Unix(0, loadedPendingTxs[0].Timestamp) // if we have pending txs, wait until timeout - if lastBlockTime.Before(pendingTxTime.Add(nodetypes.TX_TIMEOUT)) { - timer := time.NewTimer(pendingTxTime.Add(nodetypes.TX_TIMEOUT).Sub(lastBlockTime)) + if timeoutTime := pendingTxTime.Add(nodetypes.TX_TIMEOUT); lastBlockTime.Before(timeoutTime) { + timer := time.NewTimer(timeoutTime.Sub(lastBlockTime)) <-timer.C } - // delete existing pending txs - pendingKVs, err := n.RawKVPendingTxs(loadedPendingTxs, true) + // convert pending txs to raw kv pairs for deletion + pendingKVs, err := n.PendingTxsToRawKV(loadedPendingTxs, true) if err != nil { return err } + + // add pending txs delegation to db batch dbBatchKVs = append(dbBatchKVs, pendingKVs...) // convert pending txs to pending msgs - for _, txInfo := range loadedPendingTxs { + for i, txInfo := range loadedPendingTxs { tx, err := n.DecodeTx(txInfo.Tx) if err != nil { return err } if txInfo.Save { - n.pendingProcessedData = append(n.pendingProcessedData, nodetypes.ProcessedMsgs{ + n.pendingProcessedMsgs = append(n.pendingProcessedMsgs, nodetypes.ProcessedMsgs{ Msgs: tx.GetMsgs(), Timestamp: time.Now().UnixNano(), Save: txInfo.Save, }) } - } - for i, pendingTx := range loadedPendingTxs { - n.logger.Debug("pending tx", zap.Int("index", i), zap.String("tx", pendingTx.String())) + n.logger.Debug("pending tx", zap.Int("index", i), zap.String("tx", txInfo.String())) } } - loadedProcessedData, err := n.loadProcessedData() + loadedProcessedMsgs, err := n.loadProcessedMsgs() if err != nil { return err } - kvProcessedData, err := n.RawKVProcessedData(loadedProcessedData, true) + + // need to remove processed msgs from db before updating the timestamp + // because the timestamp is used as a key. + kvProcessedMsgs, err := n.ProcessedMsgsToRawKV(loadedProcessedMsgs, true) if err != nil { return err } - dbBatchKVs = append(dbBatchKVs, kvProcessedData...) + dbBatchKVs = append(dbBatchKVs, kvProcessedMsgs...) - for i, pendingMsgs := range loadedProcessedData { - loadedProcessedData[i].Timestamp = time.Now().UnixNano() + // update timestamp of loaded processed msgs + for i, pendingMsgs := range loadedProcessedMsgs { + loadedProcessedMsgs[i].Timestamp = time.Now().UnixNano() n.logger.Debug("pending msgs", zap.Int("index", i), zap.String("msgs", pendingMsgs.String())) } - n.pendingProcessedData = append(n.pendingProcessedData, loadedProcessedData...) - - kvProcessedData, err = n.RawKVProcessedData(n.pendingProcessedData, false) + // save all pending msgs with updated timestamp to db + n.pendingProcessedMsgs = append(n.pendingProcessedMsgs, loadedProcessedMsgs...) + kvProcessedMsgs, err = n.ProcessedMsgsToRawKV(n.pendingProcessedMsgs, false) if err != nil { return err } - dbBatchKVs = append(dbBatchKVs, kvProcessedData...) + dbBatchKVs = append(dbBatchKVs, kvProcessedMsgs...) // save all pending msgs first, then broadcast them err = n.db.RawBatchSet(dbBatchKVs...) if err != nil { return err } + return nil } diff --git a/node/process.go b/node/process.go index 361b7f8..968fe4e 100644 --- a/node/process.go +++ b/node/process.go @@ -105,22 +105,26 @@ func (n *Node) handleNewBlock(block *rpccoretypes.ResultBlock, blockResult *rpcc // check pending txs first // TODO: may handle pending txs with same level of other handlers for _, tx := range block.Block.Txs { - if n.localPendingTxLength() == 0 { + if n.lenLocalPendingTx() == 0 { break - } else if pendingTx := n.getLocalPendingTx(); TxHash(tx) == pendingTx.TxHash { + } + + // check if the first pending tx is included in the block + if pendingTx := n.peekLocalPendingTx(); TxHash(tx) == pendingTx.TxHash { n.logger.Debug("tx inserted", zap.Int64("height", block.Block.Height), zap.Uint64("sequence", pendingTx.Sequence), zap.String("txHash", pendingTx.TxHash)) err := n.deletePendingTx(pendingTx.Sequence) if err != nil { return err } - n.deleteLocalPendingTx() + n.dequeueLocalPendingTx() } } - if length := n.localPendingTxLength(); length > 0 { + if length := n.lenLocalPendingTx(); length > 0 { n.logger.Debug("remaining pending txs", zap.Int64("height", block.Block.Height), zap.Int("count", length)) - pendingTxTime := time.Unix(0, n.getLocalPendingTx().Timestamp) + pendingTxTime := time.Unix(0, n.peekLocalPendingTx().Timestamp) if block.Block.Time.After(pendingTxTime.Add(nodetypes.TX_TIMEOUT)) { + // @sh-cha: should we rebroadcast pending txs? or rasing monitoring alert? panic(fmt.Errorf("something wrong, pending txs are not processed for a long time; current block time: %s, pending tx processing time: %s", block.Block.Time.String(), pendingTxTime.String())) } } @@ -184,12 +188,11 @@ func (n *Node) handleEvent(blockHeight uint64, latestHeight uint64, event abcity if n.eventHandlers[event.GetType()] == nil { return nil } - n.logger.Debug("handle event", zap.Uint64("height", blockHeight), zap.String("type", event.GetType())) - err := n.eventHandlers[event.Type](nodetypes.EventHandlerArgs{ + n.logger.Debug("handle event", zap.Uint64("height", blockHeight), zap.String("type", event.GetType())) + return n.eventHandlers[event.Type](nodetypes.EventHandlerArgs{ BlockHeight: blockHeight, LatestHeight: latestHeight, EventAttributes: event.GetAttributes(), }) - return err } diff --git a/node/query.go b/node/query.go index 88d9ba1..1c8e6bd 100644 --- a/node/query.go +++ b/node/query.go @@ -141,13 +141,16 @@ func GetHeightFromMetadata(md metadata.MD) (int64, error) { return 0, nil } -func GetQueryContext(height uint64) context.Context { +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete: +func GetQueryContext(height uint64) (context.Context, context.CancelFunc) { // TODO: configurable timeout timeout := 10 * time.Second - ctx, _ := context.WithTimeout(context.Background(), timeout) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + strHeight := strconv.FormatUint(height, 10) ctx = metadata.AppendToOutgoingContext(ctx, grpctypes.GRPCBlockHeightHeader, strHeight) - return ctx + return ctx, cancel } func (n *Node) QueryRawCommit(height int64) ([]byte, error) { diff --git a/node/tx.go b/node/tx.go index bba6095..9779101 100644 --- a/node/tx.go +++ b/node/tx.go @@ -14,7 +14,6 @@ import ( comettypes "github.com/cometbft/cometbft/types" "github.com/cosmos/cosmos-sdk/client/tx" "github.com/cosmos/cosmos-sdk/crypto/keyring" - "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" txtypes "github.com/cosmos/cosmos-sdk/types/tx" @@ -65,8 +64,7 @@ func (n *Node) txBroadcastLooper(ctx context.Context) error { func (n *Node) handleMsgError(err error) error { if accountSeqRegex.FindStringSubmatch(err.Error()) != nil { // account sequence mismatched - // TODO: not panic, but handle mismatched sequence - // panic(err) + // TODO: handle mismatched sequence return err } @@ -84,7 +82,8 @@ func (n *Node) handleMsgError(err error) error { n.logger.Warn("ignoring error", zap.String("error", err.Error())) return nil } - panic(err) + + return err } for _, e := range ignoringErrors { @@ -93,14 +92,14 @@ func (n *Node) handleMsgError(err error) error { return nil } } + // n.logger.Error("failed to handle processed msgs", zap.String("error", err.Error())) - // TODO: not panic, handle error - panic(err) + return err } func (n *Node) handleProcessedMsgs(ctx context.Context, data nodetypes.ProcessedMsgs) error { sequence := n.txf.Sequence() - txBytes, err := n.buildMessages(ctx, data.Msgs) + txBytes, err := n.buildTxWithMessages(ctx, data.Msgs) if err != nil { return sdkerrors.Wrapf(err, "simulation failed") } @@ -116,12 +115,14 @@ func (n *Node) handleProcessedMsgs(ctx context.Context, data nodetypes.Processed n.logger.Debug("broadcast tx", zap.String("tx_hash", TxHash(txBytes)), zap.Uint64("sequence", sequence)) + // @sh-cha: maybe we should use data.Save? if data.Timestamp != 0 { err = n.deleteProcessedMsgs(data.Timestamp) if err != nil { return err } } + n.txf = n.txf.WithSequence(n.txf.Sequence() + 1) pendingTx := nodetypes.PendingTxInfo{ ProcessedHeight: n.GetHeight(), @@ -131,22 +132,30 @@ func (n *Node) handleProcessedMsgs(ctx context.Context, data nodetypes.Processed Timestamp: data.Timestamp, Save: data.Save, } + + // save pending transaction to the database for handling after restart err = n.savePendingTx(sequence, pendingTx) if err != nil { return err } - n.appendLocalPendingTx(pendingTx) + + // save pending tx to local memory to handle this tx in this session + n.enqueueLocalPendingTx(pendingTx) + return nil } +// BroadcastTxSync broadcasts transaction bytes to txBroadcastLooper. func (n *Node) BroadcastMsgs(msgs nodetypes.ProcessedMsgs) { if n.txChannel == nil || !n.HasKey() { return } + n.txChannel <- msgs } -func (n *Node) buildMessages( +// buildTxWithMessages creates a transaction from the given messages. +func (n *Node) buildTxWithMessages( ctx context.Context, msgs []sdk.Msg, ) ( @@ -184,14 +193,20 @@ func (n *Node) calculateGas(ctx context.Context, txf tx.Factory, msgs ...sdk.Msg return txtypes.SimulateResponse{}, 0, err } - txBytes, err := buildSimTx(keyInfo, txf, msgs...) + txBytes, err := n.buildSimTx(keyInfo, txf, msgs...) + if err != nil { + return txtypes.SimulateResponse{}, 0, err + } + + simReq := txtypes.SimulateRequest{TxBytes: txBytes} + reqBytes, err := simReq.Marshal() if err != nil { return txtypes.SimulateResponse{}, 0, err } simQuery := abci.RequestQuery{ Path: "/cosmos.tx.v1beta1.Service/Simulate", - Data: txBytes, + Data: reqBytes, } res, err := n.QueryABCI(ctx, simQuery) @@ -225,14 +240,13 @@ func (n *Node) adjustEstimatedGas(gasUsed uint64) (uint64, error) { // BuildSimTx creates an unsigned tx with an empty single signature and returns // the encoded transaction or an error if the unsigned transaction cannot be built. -func buildSimTx(info *keyring.Record, txf tx.Factory, msgs ...sdk.Msg) ([]byte, error) { +func (n Node) buildSimTx(info *keyring.Record, txf tx.Factory, msgs ...sdk.Msg) ([]byte, error) { txb, err := txf.BuildUnsignedTx(msgs...) if err != nil { return nil, err } - var pk cryptotypes.PubKey = &secp256k1.PubKey{} // use default public key type - + var pk cryptotypes.PubKey pk, err = info.GetPubKey() if err != nil { return nil, err @@ -251,43 +265,31 @@ func buildSimTx(info *keyring.Record, txf tx.Factory, msgs ...sdk.Msg) ([]byte, return nil, err } - protoProvider, ok := txb.(protoTxProvider) - if !ok { - return nil, fmt.Errorf("cannot simulate amino tx") - } - - simReq := txtypes.SimulateRequest{Tx: protoProvider.GetProtoTx()} - return simReq.Marshal() -} - -// protoTxProvider is a type which can provide a proto transaction. It is a -// workaround to get access to the wrapper TxBuilder's method GetProtoTx(). -type protoTxProvider interface { - GetProtoTx() *txtypes.Tx + return n.EncodeTx(txb.GetTx()) } -func (n *Node) appendLocalPendingTx(tx nodetypes.PendingTxInfo) { +func (n *Node) enqueueLocalPendingTx(tx nodetypes.PendingTxInfo) { n.pendingTxMu.Lock() defer n.pendingTxMu.Unlock() n.pendingTxs = append(n.pendingTxs, tx) } -func (n *Node) getLocalPendingTx() nodetypes.PendingTxInfo { +func (n *Node) peekLocalPendingTx() nodetypes.PendingTxInfo { n.pendingTxMu.Lock() defer n.pendingTxMu.Unlock() return n.pendingTxs[0] } -func (n *Node) localPendingTxLength() int { +func (n *Node) lenLocalPendingTx() int { n.pendingTxMu.Lock() defer n.pendingTxMu.Unlock() return len(n.pendingTxs) } -func (n *Node) deleteLocalPendingTx() { +func (n *Node) dequeueLocalPendingTx() { n.pendingTxMu.Lock() defer n.pendingTxMu.Unlock() diff --git a/node/types/config.go b/node/types/config.go index 4cf98aa..7b6a85d 100644 --- a/node/types/config.go +++ b/node/types/config.go @@ -1,8 +1,13 @@ package types type NodeConfig struct { - RPC string `json:"rpc"` - ChainID string `json:"chain_id"` + RPC string `json:"rpc"` + ChainID string `json:"chain_id"` + + // Mnemonic is the mnemonic phrase for the bot account. + // + // If you don't want to use the mnemonic, you can leave it empty. + // Then the bot will skip the tx submission. Mnemonic string `json:"mnemonic"` GasPrice string `json:"gas_price"` } diff --git a/node/types/db.go b/node/types/db.go index ffbc2e8..2cf5477 100644 --- a/node/types/db.go +++ b/node/types/db.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -14,7 +15,11 @@ type PendingTxInfo struct { Tx []byte `json:"tx"` TxHash string `json:"tx_hash"` Timestamp int64 `json:"timestamp"` - Save bool `json:"save"` + + // Save is true if the pending tx should be saved until processed. + // Save is false if the pending tx can be discarded even if it is not processed + // like oracle tx. + Save bool `json:"save"` } func (p PendingTxInfo) Marshal() ([]byte, error) { @@ -33,15 +38,56 @@ func (p PendingTxInfo) String() string { type ProcessedMsgs struct { Msgs []sdk.Msg `json:"msgs"` Timestamp int64 `json:"timestamp"` - Save bool `json:"save"` + + // Save is true if the processed msgs should be saved until processed. + // Save is false if the processed msgs can be discarded even if they are not processed + // like oracle msgs. + Save bool `json:"save"` } -func (p ProcessedMsgs) Marshal() ([]byte, error) { - return json.Marshal(&p) +// processedMsgsJSON is a helper struct to JSON encode ProcessedMsgs +type processedMsgsJSON struct { + Msgs []string `json:"msgs"` + Timestamp int64 `json:"timestamp"` + Save bool `json:"save"` } -func (p *ProcessedMsgs) Unmarshal(data []byte) error { - return json.Unmarshal(data, p) +func (p ProcessedMsgs) MarshalInterfaceJSON(cdc codec.Codec) ([]byte, error) { + pms := processedMsgsJSON{ + Msgs: make([]string, len(p.Msgs)), + Timestamp: p.Timestamp, + Save: p.Save, + } + + for i, msg := range p.Msgs { + bz, err := cdc.MarshalInterfaceJSON(msg) + if err != nil { + return nil, err + } + + pms.Msgs[i] = string(bz) + } + + return json.Marshal(&pms) +} + +func (p *ProcessedMsgs) UnmarshalInterfaceJSON(cdc codec.Codec, data []byte) error { + var pms processedMsgsJSON + if err := json.Unmarshal(data, &pms); err != nil { + return err + } + + p.Timestamp = pms.Timestamp + p.Save = pms.Save + + p.Msgs = make([]sdk.Msg, len(pms.Msgs)) + for i, msgStr := range pms.Msgs { + if err := cdc.UnmarshalInterfaceJSON([]byte(msgStr), &p.Msgs[i]); err != nil { + return err + } + } + + return nil } func (p ProcessedMsgs) String() string { diff --git a/node/types/key.go b/node/types/key.go index 7e569f3..c88d120 100644 --- a/node/types/key.go +++ b/node/types/key.go @@ -5,15 +5,10 @@ import ( ) var ( + // Keys LastProcessedBlockHeightKey = []byte("last_processed_block_height") - - PendingTxsKey = []byte("pending_txs") - // be used to iterate all pending txs - LastPendingTxKey = append(PendingTxsKey, dbtypes.Splitter+1) - - ProcessedMsgsKey = []byte("processed_msgs") - // be used to iterate all processed msgs - LastProcessedMsgsKey = append(ProcessedMsgsKey, dbtypes.Splitter+1) + PendingTxsKey = []byte("pending_txs") + ProcessedMsgsKey = []byte("processed_msgs") ) func PrefixedPendingTx(sequence uint64) []byte { diff --git a/server/server.go b/server/server.go index 7efa954..74e5fe1 100644 --- a/server/server.go +++ b/server/server.go @@ -12,8 +12,8 @@ func NewServer() *Server { } } -func (s *Server) Start() error { - return s.Listen(":3000") +func (s *Server) Start(address string) error { + return s.Listen(address) } func (s *Server) RegisterQuerier(path string, fn func(c *fiber.Ctx) error) { diff --git a/types/db.go b/types/db.go index 81f4856..537a7cd 100644 --- a/types/db.go +++ b/types/db.go @@ -1,18 +1,25 @@ package types +// KV is a key-value pair with prefixing the key. type KV struct { Key []byte Value []byte } +// RawKV is a key-value pair without prefixing the key. +type RawKV struct { + Key []byte + Value []byte +} + type DB interface { Get([]byte) ([]byte, error) Set([]byte, []byte) error - RawBatchSet(...KV) error + RawBatchSet(...RawKV) error BatchSet(...KV) error Delete([]byte) error Close() error - PrefixedIterate([]byte, func([]byte, []byte) bool) error + PrefixedIterate([]byte, func([]byte, []byte) (bool, error)) error SeekPrevInclusiveKey([]byte, []byte) ([]byte, []byte, error) WithPrefix([]byte) DB PrefixedKey([]byte) []byte diff --git a/version/command.go b/version/command.go new file mode 100644 index 0000000..230e0c0 --- /dev/null +++ b/version/command.go @@ -0,0 +1,63 @@ +package version + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" +) + +const ( + flagLong = "long" + flagOutput = "output" +) + +// NewVersionCommand returns a CLI command to interactively print the application binary version information. +// Note: When seeking to add the extra info to the context +// The below can be added to the initRootCmd to include the extraInfo field +// +// cmdContext := context.WithValue(context.Background(), version.ContextKey{}, extraInfo) +// rootCmd.SetContext(cmdContext) +func NewVersionCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "version", + Short: "Print the application binary version information", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + verInfo := NewInfo() + + if long, _ := cmd.Flags().GetBool(flagLong); !long { + fmt.Fprintln(cmd.OutOrStdout(), verInfo.Version) + return nil + } + + var ( + bz []byte + err error + ) + + output, _ := cmd.Flags().GetString(flagOutput) + switch strings.ToLower(output) { + case "json": + bz, err = json.Marshal(verInfo) + + default: + bz, err = yaml.Marshal(&verInfo) + } + + if err != nil { + return err + } + + fmt.Fprintln(cmd.OutOrStdout(), string(bz)) + return nil + }, + } + + cmd.Flags().Bool(flagLong, false, "Print long version information") + cmd.Flags().StringP(flagOutput, "o", "text", "Output format (text|json)") + + return cmd +} diff --git a/version/version.go b/version/version.go new file mode 100644 index 0000000..6d2a9d6 --- /dev/null +++ b/version/version.go @@ -0,0 +1,21 @@ +package version + +var ( + // Version is the current version of the bot + Version = "" + + // GitCommit is the commit hash of the bot + GitCommit = "" +) + +type Info struct { + Version string `json:"version" yaml:"version"` + GitCommit string `json:"git_commit" yaml:"git-commit"` +} + +func NewInfo() Info { + return Info{ + Version: Version, + GitCommit: GitCommit, + } +}